mirror of
https://github.com/pseXperiments/icicle.git
synced 2026-01-11 16:38:27 -05:00
Compare commits
2 Commits
new_device
...
rust-inter
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de1d27d846 | ||
|
|
392c9f8e2e |
48
Cargo.toml
48
Cargo.toml
@@ -1,49 +1,9 @@
|
||||
[package]
|
||||
name = "icicle-utils"
|
||||
[workspace]
|
||||
name = "icicle"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = [ "Ingonyama" ]
|
||||
description = "An implementation of the Ingonyama CUDA Library"
|
||||
homepage = "https://www.ingonyama.com"
|
||||
repository = "https://github.com/ingonyama-zk/icicle"
|
||||
|
||||
[[bench]]
|
||||
name = "ntt"
|
||||
path = "benches/ntt.rs"
|
||||
harness = false
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[[bench]]
|
||||
name = "msm"
|
||||
path = "benches/msm.rs"
|
||||
harness = false
|
||||
members = ["icicle-core", "bls12-381", "bls12-377", "bn254"]
|
||||
|
||||
[dependencies]
|
||||
hex = "*"
|
||||
ark-std = "0.3.0"
|
||||
ark-ff = "0.3.0"
|
||||
ark-poly = "0.3.0"
|
||||
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
|
||||
ark-bls12-381 = "0.3.0"
|
||||
ark-bls12-377 = "0.3.0"
|
||||
ark-bn254 = "0.3.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_derive = "1.0"
|
||||
serde_cbor = "0.11.2"
|
||||
|
||||
rustacuda = "0.1"
|
||||
rustacuda_core = "0.1"
|
||||
rustacuda_derive = "0.1"
|
||||
|
||||
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
"criterion" = "0.4.0"
|
||||
|
||||
[features]
|
||||
default = ["bls12_381"]
|
||||
bls12_381 = ["ark-bls12-381/curve"]
|
||||
g2 = []
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
extern crate criterion;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
use icicle_utils::test_bls12_381::{
|
||||
commit_batch_bls12_381, generate_random_points_bls12_381, set_up_scalars_bls12_381,
|
||||
};
|
||||
use icicle_utils::utils::*;
|
||||
#[cfg(feature = "g2")]
|
||||
use icicle_utils::{commit_batch_g2, field::ExtensionField};
|
||||
|
||||
use rustacuda::prelude::*;
|
||||
|
||||
const LOG_MSM_SIZES: [usize; 1] = [12];
|
||||
const BATCH_SIZES: [usize; 2] = [128, 256];
|
||||
|
||||
fn bench_msm(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("MSM");
|
||||
for log_msm_size in LOG_MSM_SIZES {
|
||||
for batch_size in BATCH_SIZES {
|
||||
let msm_size = 1 << log_msm_size;
|
||||
let (scalars, _, _) = set_up_scalars_bls12_381(msm_size, 0, false);
|
||||
let batch_scalars = vec![scalars; batch_size].concat();
|
||||
let mut d_scalars = DeviceBuffer::from_slice(&batch_scalars[..]).unwrap();
|
||||
|
||||
let points = generate_random_points_bls12_381(msm_size, get_rng(None));
|
||||
let batch_points = vec![points; batch_size].concat();
|
||||
let mut d_points = DeviceBuffer::from_slice(&batch_points[..]).unwrap();
|
||||
|
||||
#[cfg(feature = "g2")]
|
||||
let g2_points = generate_random_points::<ExtensionField>(msm_size, get_rng(None));
|
||||
#[cfg(feature = "g2")]
|
||||
let g2_batch_points = vec![g2_points; batch_size].concat();
|
||||
#[cfg(feature = "g2")]
|
||||
let mut d_g2_points = DeviceBuffer::from_slice(&g2_batch_points[..]).unwrap();
|
||||
|
||||
group.sample_size(30).bench_function(
|
||||
&format!("MSM of size 2^{} in batch {}", log_msm_size, batch_size),
|
||||
|b| b.iter(|| commit_batch_bls12_381(&mut d_points, &mut d_scalars, batch_size)),
|
||||
);
|
||||
|
||||
#[cfg(feature = "g2")]
|
||||
group.sample_size(10).bench_function(
|
||||
&format!("G2 MSM of size 2^{} in batch {}", log_msm_size, batch_size),
|
||||
|b| b.iter(|| commit_batch_g2(&mut d_g2_points, &mut d_scalars, batch_size))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(msm_benches, bench_msm);
|
||||
criterion_main!(msm_benches);
|
||||
@@ -1,34 +0,0 @@
|
||||
extern crate criterion;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
use icicle_utils::test_bls12_381::{interpolate_scalars_batch_bls12_381, interpolate_points_batch_bls12_381, set_up_scalars_bls12_381, set_up_points_bls12_381};
|
||||
|
||||
|
||||
const LOG_NTT_SIZES: [usize; 1] = [15];
|
||||
const BATCH_SIZES: [usize; 2] = [8, 16];
|
||||
|
||||
fn bench_ntt(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("NTT");
|
||||
for log_ntt_size in LOG_NTT_SIZES {
|
||||
for batch_size in BATCH_SIZES {
|
||||
let ntt_size = 1 << log_ntt_size;
|
||||
let (_, mut d_evals, mut d_domain) = set_up_scalars_bls12_381(ntt_size * batch_size, log_ntt_size, true);
|
||||
let (_, mut d_points_evals, _) = set_up_points_bls12_381(ntt_size * batch_size, log_ntt_size, true);
|
||||
|
||||
group.sample_size(100).bench_function(
|
||||
&format!("Scalar NTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|
||||
|b| b.iter(|| interpolate_scalars_batch_bls12_381(&mut d_evals, &mut d_domain, batch_size))
|
||||
);
|
||||
|
||||
group.sample_size(10).bench_function(
|
||||
&format!("EC NTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|
||||
|b| b.iter(|| interpolate_points_batch_bls12_381(&mut d_points_evals, &mut d_domain, batch_size))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(ntt_benches, bench_ntt);
|
||||
criterion_main!(ntt_benches);
|
||||
|
||||
34
bls12-377/Cargo.toml
Normal file
34
bls12-377/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "bls12-377"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = [ "Ingonyama" ]
|
||||
|
||||
[dependencies]
|
||||
icicle-core = { path = "../icicle-core" }
|
||||
|
||||
hex = "*"
|
||||
ark-std = "0.3.0"
|
||||
ark-ff = "0.3.0"
|
||||
ark-poly = "0.3.0"
|
||||
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
|
||||
ark-bls12-377 = "0.3.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_derive = "1.0"
|
||||
serde_cbor = "0.11.2"
|
||||
|
||||
rustacuda = "0.1"
|
||||
rustacuda_core = "0.1"
|
||||
rustacuda_derive = "0.1"
|
||||
|
||||
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
"criterion" = "0.4.0"
|
||||
|
||||
[features]
|
||||
g2 = []
|
||||
@@ -23,11 +23,12 @@ fn main() {
|
||||
nvcc.define("G2_DEFINED", None);
|
||||
}
|
||||
nvcc.cuda(true);
|
||||
nvcc.define("FEATURE_BLS12_377", None);
|
||||
nvcc.debug(false);
|
||||
nvcc.flag(&arch);
|
||||
nvcc.flag(&stream);
|
||||
nvcc.files([
|
||||
"./icicle/curves/index.cu",
|
||||
"../icicle-cuda/curves/index.cu",
|
||||
]);
|
||||
nvcc.compile("ingo_icicle"); //TODO: extension??
|
||||
}
|
||||
4
bls12-377/src/basic_structs/field.rs
Normal file
4
bls12-377/src/basic_structs/field.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub trait Field<const NUM_LIMBS: usize> {
|
||||
const MODOLUS: [u32;NUM_LIMBS];
|
||||
const LIMBS: usize = NUM_LIMBS;
|
||||
}
|
||||
3
bls12-377/src/basic_structs/mod.rs
Normal file
3
bls12-377/src/basic_structs/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod field;
|
||||
pub mod scalar;
|
||||
pub mod point;
|
||||
106
bls12-377/src/basic_structs/point.rs
Normal file
106
bls12-377/src/basic_structs/point.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use std::ffi::c_uint;
|
||||
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger256, PrimeField};
|
||||
use std::mem::transmute;
|
||||
use ark_ff::Field;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
|
||||
use super::scalar::{get_fixed_limbs, self};
|
||||
|
||||
|
||||
#[derive(Debug, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointT<BF: scalar::ScalarTrait> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
pub z: BF,
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
|
||||
fn default() -> Self {
|
||||
PointT::zero()
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn zero() -> Self {
|
||||
PointT {
|
||||
x: BF::zero(),
|
||||
y: BF::one(),
|
||||
z: BF::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn infinity() -> Self {
|
||||
Self::zero()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointAffineNoInfinityT<BF> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
}
|
||||
|
||||
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
|
||||
fn default() -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::zero(),
|
||||
y: BF::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
|
||||
///From u32 limbs x,y
|
||||
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limbs(&self) -> Vec<u32> {
|
||||
[self.x.limbs(), self.y.limbs()].concat()
|
||||
}
|
||||
|
||||
pub fn to_projective(&self) -> PointT<BF> {
|
||||
PointT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
z: BF::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
|
||||
PointT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y),
|
||||
z: BF::from_limbs(z)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
|
||||
let l = value.len();
|
||||
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
|
||||
PointT {
|
||||
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
|
||||
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
|
||||
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
|
||||
PointAffineNoInfinityT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
}
|
||||
}
|
||||
}
|
||||
102
bls12-377/src/basic_structs/scalar.rs
Normal file
102
bls12-377/src/basic_structs/scalar.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination};
|
||||
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use super::field::{Field, self};
|
||||
|
||||
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
|
||||
match val.len() {
|
||||
n if n < NUM_LIMBS => {
|
||||
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
|
||||
padded[..val.len()].copy_from_slice(&val);
|
||||
padded
|
||||
}
|
||||
n if n == NUM_LIMBS => val.try_into().unwrap(),
|
||||
_ => panic!("slice has too many elements"),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ScalarTrait{
|
||||
fn base_limbs() -> usize;
|
||||
fn zero() -> Self;
|
||||
fn from_limbs(value: &[u32]) -> Self;
|
||||
fn one() -> Self;
|
||||
fn to_bytes_le(&self) -> Vec<u8>;
|
||||
fn limbs(&self) -> &[u32];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarT<M, const NUM_LIMBS: usize> {
|
||||
pub(crate) phantom: PhantomData<M>,
|
||||
pub(crate) value : [u32; NUM_LIMBS]
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
|
||||
where
|
||||
M: Field<NUM_LIMBS>,
|
||||
{
|
||||
|
||||
fn base_limbs() -> usize {
|
||||
return NUM_LIMBS;
|
||||
}
|
||||
|
||||
fn zero() -> Self {
|
||||
ScalarT {
|
||||
value: [0u32; NUM_LIMBS],
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_limbs(value: &[u32]) -> Self {
|
||||
Self {
|
||||
value: get_fixed_limbs(value),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn one() -> Self {
|
||||
let mut s = [0u32; NUM_LIMBS];
|
||||
s[0] = 1;
|
||||
ScalarT { value: s, phantom: PhantomData }
|
||||
}
|
||||
|
||||
fn to_bytes_le(&self) -> Vec<u8> {
|
||||
self.value
|
||||
.iter()
|
||||
.map(|s| s.to_le_bytes().to_vec())
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn limbs(&self) -> &[u32] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
|
||||
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
Self::from_limbs(value)
|
||||
}
|
||||
|
||||
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
let mut value = value.to_vec();
|
||||
value.reverse();
|
||||
Self::from_limbs_le(&value)
|
||||
}
|
||||
|
||||
// Additional Functions
|
||||
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
|
||||
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
|
||||
}
|
||||
}
|
||||
62
bls12-377/src/curve_structs.rs
Normal file
62
bls12-377/src/curve_structs.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
|
||||
use crate::basic_structs::scalar::ScalarT;
|
||||
use crate::basic_structs::field::Field;
|
||||
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarField;
|
||||
impl Field<8> for ScalarField {
|
||||
const MODOLUS: [u32; 8] = [0x0;8];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct BaseField;
|
||||
impl Field<12> for BaseField {
|
||||
const MODOLUS: [u32; 12] = [0x0;12];
|
||||
}
|
||||
|
||||
|
||||
pub type Scalar = ScalarT<ScalarField,8>;
|
||||
impl Default for Scalar {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Scalar{}
|
||||
|
||||
|
||||
pub type Base = ScalarT<BaseField,12>;
|
||||
impl Default for Base {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Base{}
|
||||
|
||||
pub type Point = PointT<Base>;
|
||||
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
|
||||
|
||||
extern "C" {
|
||||
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
|
||||
}
|
||||
|
||||
impl PartialEq for Point {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
unsafe { eq(self, other) != 0 }
|
||||
}
|
||||
}
|
||||
798
bls12-377/src/from_cuda.rs
Normal file
798
bls12-377/src/from_cuda.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bls12_377::{Fq as Fq_BLS12_377, Fr as Fr_BLS12_377, G1Affine as G1Affine_BLS12_377, G1Projective as G1Projective_BLS12_377};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
extern "C" {
|
||||
fn msm_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn msm_batch_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
batch_size: usize,
|
||||
msm_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
|
||||
|
||||
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ntt_batch_cuda(
|
||||
inout: *mut Scalar,
|
||||
arr_size: usize,
|
||||
n: usize,
|
||||
inverse: bool,
|
||||
) -> c_int;
|
||||
|
||||
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
|
||||
|
||||
fn interpolate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_batch_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_batch_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_point(
|
||||
inout: *mut Point,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_scalar(
|
||||
inout: *mut Scalar,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn matrix_vec_mod_mult(
|
||||
matrix_flattened: *const Scalar,
|
||||
input: *const Scalar,
|
||||
output: *mut Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
}
|
||||
|
||||
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
|
||||
let mut ret = Point::zero();
|
||||
unsafe {
|
||||
msm_cuda(
|
||||
&mut ret as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
scalars.len(),
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn msm_batch(
|
||||
points: &[PointAffineNoInfinity],
|
||||
scalars: &[Scalar],
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> Vec<Point> {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
|
||||
let mut ret = vec![Point::zero(); batch_size];
|
||||
|
||||
unsafe {
|
||||
msm_batch_cuda(
|
||||
&mut ret[0] as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
batch_size,
|
||||
count / batch_size,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn commit(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBox<Point> {
|
||||
let mut res = DeviceBox::new(&Point::zero()).unwrap();
|
||||
unsafe {
|
||||
commit_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn commit_batch(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
|
||||
unsafe {
|
||||
commit_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
|
||||
let ret_code = unsafe {
|
||||
ntt_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
ret_code
|
||||
}
|
||||
|
||||
pub fn ntt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, false);
|
||||
}
|
||||
|
||||
pub fn intt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal_batch(
|
||||
values: &mut [Scalar],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ntt_batch_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
|
||||
unsafe {
|
||||
ecntt_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, false, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, true, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal_batch(
|
||||
values: &mut [Point],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ecntt_batch_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
|
||||
unsafe {
|
||||
DeviceBuffer::from_raw_parts(build_domain_cuda(
|
||||
domain_size,
|
||||
logn,
|
||||
inverse,
|
||||
0
|
||||
), domain_size)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn reverse_order_scalars(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_scalars_batch(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_batch_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
) {
|
||||
unsafe { reverse_order_points_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points_batch(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_points_batch_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_point(
|
||||
a as *mut _ as *mut Point,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_scalar(
|
||||
a as *mut _ as *mut Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
|
||||
let mut c = Vec::with_capacity(b.len());
|
||||
for i in 0..b.len() {
|
||||
c.push(Scalar::zero());
|
||||
}
|
||||
unsafe {
|
||||
matrix_vec_mod_mult(
|
||||
a as *const _ as *const Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
c.as_mut_slice() as *mut _ as *mut Scalar,
|
||||
b.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
|
||||
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
|
||||
unsafe { buf_cpy.copy_from(buf) };
|
||||
return buf_cpy;
|
||||
}
|
||||
|
||||
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
|
||||
let rng: Box<dyn RngCore> = match seed {
|
||||
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
|
||||
None => Box::new(rand::thread_rng()),
|
||||
};
|
||||
rng
|
||||
}
|
||||
|
||||
fn set_up_device() {
|
||||
// Set up the context, load the module, and create a stream to run kernels in.
|
||||
rustacuda::init(CudaFlags::empty()).unwrap();
|
||||
let device = Device::get_device(0).unwrap();
|
||||
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
|
||||
}
|
||||
|
||||
pub fn generate_random_points(
|
||||
count: usize,
|
||||
mut rng: Box<dyn RngCore>,
|
||||
) -> Vec<PointAffineNoInfinity> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BLS12_377::rand(&mut rng)).to_xy_strip_z())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BLS12_377::rand(&mut rng)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
|
||||
(0..count)
|
||||
.map(|_| Scalar::from_ark(Fr_BLS12_377::rand(&mut rng).into_repr()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalar
|
||||
let vector = generate_random_points_proj(test_size, get_rng(seed));
|
||||
let mut vector_mut = vector.clone();
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalars
|
||||
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
4
bls12-377/src/lib.rs
Normal file
4
bls12-377/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod test_bls12_377;
|
||||
pub mod basic_structs;
|
||||
pub mod from_cuda;
|
||||
pub mod curve_structs;
|
||||
816
bls12-377/src/test_bls12_377.rs
Normal file
816
bls12-377/src/test_bls12_377.rs
Normal file
@@ -0,0 +1,816 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
pub use crate::basic_structs::scalar::ScalarTrait;
|
||||
pub use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bls12_377::{Fq as Fq_BLS12_377, Fr as Fr_BLS12_377, G1Affine as G1Affine_BLS12_377, G1Projective as G1Projective_BLS12_377};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
|
||||
impl Scalar {
|
||||
pub fn to_biginteger254(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn to_ark(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_biginteger256(ark: BigInteger256) -> Self {
|
||||
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
|
||||
}
|
||||
|
||||
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
|
||||
unsafe { transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
|
||||
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
|
||||
}
|
||||
|
||||
pub fn to_ark_transmute(&self) -> Fr_BLS12_377 {
|
||||
unsafe { std::mem::transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_ark_transmute(v: &Fr_BLS12_377) -> Scalar {
|
||||
unsafe { std::mem::transmute_copy(v) }
|
||||
}
|
||||
|
||||
pub fn to_ark_mod_p(&self) -> Fr_BLS12_377 {
|
||||
Fr_BLS12_377::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> Fr_BLS12_377 {
|
||||
Fr_BLS12_377::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
|
||||
}
|
||||
|
||||
pub fn from_ark(v: BigInteger256) -> Scalar {
|
||||
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Base {
|
||||
pub fn to_ark(&self) -> BigInteger384 {
|
||||
BigInteger384::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: BigInteger384) -> Self {
|
||||
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Point {
|
||||
pub fn to_ark(&self) -> G1Projective_BLS12_377 {
|
||||
self.to_ark_affine().into_projective()
|
||||
}
|
||||
|
||||
pub fn to_ark_affine(&self) -> G1Affine_BLS12_377 {
|
||||
//TODO: generic conversion
|
||||
use ark_ff::Field;
|
||||
use std::ops::Mul;
|
||||
let proj_x_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.x.to_bytes_le());
|
||||
let proj_y_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.y.to_bytes_le());
|
||||
let proj_z_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.z.to_bytes_le());
|
||||
let inverse_z = proj_z_field.inverse().unwrap();
|
||||
let aff_x = proj_x_field.mul(inverse_z);
|
||||
let aff_y = proj_y_field.mul(inverse_z);
|
||||
G1Affine_BLS12_377::new(aff_x, aff_y, false)
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: G1Projective_BLS12_377) -> Point {
|
||||
use ark_ff::Field;
|
||||
let z_inv = ark.z.inverse().unwrap();
|
||||
let z_invsq = z_inv * z_inv;
|
||||
let z_invq3 = z_invsq * z_inv;
|
||||
Point {
|
||||
x: Base::from_ark((ark.x * z_invsq).into_repr()),
|
||||
y: Base::from_ark((ark.y * z_invq3).into_repr()),
|
||||
z: Base::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PointAffineNoInfinity {
|
||||
|
||||
pub fn to_ark(&self) -> G1Affine_BLS12_377 {
|
||||
G1Affine_BLS12_377::new(Fq_BLS12_377::new(self.x.to_ark()), Fq_BLS12_377::new(self.y.to_ark()), false)
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> G1Affine_BLS12_377 {
|
||||
G1Affine_BLS12_377::new(
|
||||
Fq_BLS12_377::from_repr(self.x.to_ark()).unwrap(),
|
||||
Fq_BLS12_377::from_repr(self.y.to_ark()).unwrap(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn from_ark(p: &G1Affine_BLS12_377) -> Self {
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(p.x.into_repr()),
|
||||
y: Base::from_ark(p.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Point {
|
||||
pub fn to_affine(&self) -> PointAffineNoInfinity {
|
||||
let ark_affine = self.to_ark_affine();
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(ark_affine.x.into_repr()),
|
||||
y: Base::from_ark(ark_affine.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_bls12_377 {
|
||||
use std::ops::Add;
|
||||
use ark_bls12_377::{Fr, G1Affine, G1Projective};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::{FftField, Field, Zero, PrimeField};
|
||||
use ark_std::UniformRand;
|
||||
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
|
||||
use crate::curve_structs::{Point, Scalar, Base};
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
|
||||
|
||||
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let mut points_ga: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..nof_elements {
|
||||
let aff = G1Projective::rand(&mut rng);
|
||||
points_ga.push(aff);
|
||||
}
|
||||
points_ga
|
||||
}
|
||||
|
||||
fn ecntt_arc_naive(
|
||||
points: &Vec<G1Projective>,
|
||||
size: usize,
|
||||
inverse: bool,
|
||||
) -> Vec<G1Projective> {
|
||||
let mut result: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..size {
|
||||
result.push(G1Projective::zero());
|
||||
}
|
||||
let rou: Fr;
|
||||
if !inverse {
|
||||
rou = Fr::get_root_of_unity(size).unwrap();
|
||||
} else {
|
||||
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
|
||||
}
|
||||
for k in 0..size {
|
||||
for l in 0..size {
|
||||
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
|
||||
let mul_rou = Fr::pow(&rou, &pow);
|
||||
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
|
||||
}
|
||||
}
|
||||
if inverse {
|
||||
let size2 = size as u64;
|
||||
for k in 0..size {
|
||||
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
|
||||
result[k] = result[k].into_affine().mul(multfactor);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
|
||||
let mut eq = true;
|
||||
for i in 0..points.len() {
|
||||
if points2[i].ne(&points[i]) {
|
||||
eq = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return eq;
|
||||
}
|
||||
|
||||
fn test_naive_ark_ecntt(size: usize) {
|
||||
let points = random_points_ark_proj(size);
|
||||
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
|
||||
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
|
||||
assert!(!check_eq(&result2, &result1));
|
||||
assert!(check_eq(&result2, &points));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msm() {
|
||||
let test_sizes = [6, 9];
|
||||
|
||||
for pow2 in test_sizes {
|
||||
let count = 1 << pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points = generate_random_points(count, get_rng(seed));
|
||||
let scalars = generate_random_scalars(count, get_rng(seed));
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
|
||||
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
|
||||
|
||||
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
|
||||
assert_eq!(msm_result.to_ark(), msm_result_ark);
|
||||
assert_eq!(
|
||||
msm_result.to_ark_affine(),
|
||||
Point::from_ark(msm_result_ark).to_ark_affine()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_msm() {
|
||||
for batch_pow2 in [2, 4] {
|
||||
for pow2 in [4, 6] {
|
||||
let msm_size = 1 << pow2;
|
||||
let batch_size = 1 << batch_pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
|
||||
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
|
||||
|
||||
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let expected: Vec<_> = point_r_ark
|
||||
.chunks(msm_size)
|
||||
.zip(scalars_r_ark.chunks(msm_size))
|
||||
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
|
||||
.collect();
|
||||
|
||||
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
|
||||
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
let test_size = 1 << 8;
|
||||
let seed = Some(0);
|
||||
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
|
||||
let mut points = generate_random_points(test_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
|
||||
let mut h_commit_result = Point::zero();
|
||||
d_commit_result.copy_to(&mut h_commit_result).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
assert_ne!(msm_result, Point::zero());
|
||||
assert_ne!(h_commit_result, Point::zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_commit() {
|
||||
let batch_size = 4;
|
||||
let test_size = 1 << 12;
|
||||
let seed = Some(0);
|
||||
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
|
||||
let points = generate_random_points(test_size * batch_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
|
||||
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
|
||||
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
|
||||
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
for h in h_commit_result {
|
||||
assert_ne!(h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 3;
|
||||
|
||||
let scalars = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut ntt_result = scalars.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
assert_ne!(ntt_result, scalars);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
intt(&mut intt_result, 0);
|
||||
|
||||
assert_eq!(intt_result, scalars);
|
||||
|
||||
//ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
|
||||
|
||||
test_naive_ark_ecntt(test_size);
|
||||
|
||||
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
|
||||
|
||||
//naive ark
|
||||
let points_proj_ark = points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark())
|
||||
.collect::<Vec<G1Projective>>();
|
||||
|
||||
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
|
||||
|
||||
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
|
||||
|
||||
assert_eq!(points_proj_ark, iecntt_result_naive);
|
||||
|
||||
//ingo gpu
|
||||
let mut ecntt_result = points_proj.to_vec();
|
||||
ecntt(&mut ecntt_result, 0);
|
||||
|
||||
assert_ne!(ecntt_result, points_proj);
|
||||
|
||||
let mut iecntt_result = ecntt_result.clone();
|
||||
iecntt(&mut iecntt_result, 0);
|
||||
|
||||
assert_eq!(
|
||||
iecntt_result_naive,
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
assert_eq!(
|
||||
iecntt_result
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>(),
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt_batch() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let batches = 4;
|
||||
|
||||
let scalars_batch: Vec<Scalar> =
|
||||
generate_random_scalars(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result = scalars_batch.clone();
|
||||
|
||||
// do batch ntt
|
||||
ntt_batch(&mut ntt_result, test_size, 0);
|
||||
|
||||
let mut ntt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do ntt for every chunk
|
||||
for i in 0..batches {
|
||||
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
|
||||
ntt(&mut ntt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_vec_of_vec[i],
|
||||
ntt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
// check that ntt output is different from input
|
||||
assert_ne!(ntt_result, scalars_batch);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
// do batch intt
|
||||
intt_batch(&mut intt_result, test_size, 0);
|
||||
|
||||
let mut intt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do intt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
|
||||
intt(&mut intt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the intt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_vec_of_vec[i],
|
||||
intt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result, scalars_batch);
|
||||
|
||||
// //ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result_points = points_proj.clone();
|
||||
|
||||
// do batch ecintt
|
||||
ecntt_batch(&mut ntt_result_points, test_size, 0);
|
||||
|
||||
let mut ntt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
|
||||
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_points_vec_of_vec[i],
|
||||
ntt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_ne!(ntt_result_points, points_proj);
|
||||
|
||||
let mut intt_result_points = ntt_result_points.clone();
|
||||
|
||||
// do batch ecintt
|
||||
iecntt_batch(&mut intt_result_points, test_size, 0);
|
||||
|
||||
let mut intt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
// do ecintt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
|
||||
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_points_vec_of_vec[i],
|
||||
intt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result_points, points_proj);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_interpolation() {
|
||||
let log_test_size = 7;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
|
||||
intt(&mut evals_mut, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 10;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
intt_batch(&mut evals_mut, test_size, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_interpolation() {
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_points(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
|
||||
iecntt(&mut evals_mut[..], 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_points_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
iecntt_batch(&mut evals_mut[..], test_size, 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation() {
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size.. (1 << log_test_domain_size) {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation() {
|
||||
let batch_size = 6;
|
||||
let log_test_domain_size = 8;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation() {
|
||||
let log_test_domain_size = 7;
|
||||
let coeff_size = 1 << 7;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size..(1 << log_test_domain_size) {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
for i in 0..coeff_size {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation() {
|
||||
let batch_size = 4;
|
||||
let log_test_domain_size = 6;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 5;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
for i in j * domain_size..(j * domain_size + coeff_size) {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_trivial_coset() {
|
||||
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
|
||||
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_evals_coset);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
for i in 0..test_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 2;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
for i in 0..test_size * batch_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
|
||||
#[test]
|
||||
fn test_matrix_multiplication() {
|
||||
let seed = None; // some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let rou = Fr::get_root_of_unity(test_size).unwrap();
|
||||
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|
||||
|row_num| { (0..test_size).map(
|
||||
|col_num| {
|
||||
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
|
||||
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
|
||||
}).collect::<Vec<Scalar>>()
|
||||
}).flatten().collect::<Vec<_>>();
|
||||
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
|
||||
let mut ntt_result = vector.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
// we don't use the same roots of unity as arkworks, so the results are permutations
|
||||
// of one another and the only guaranteed fixed scalars are the following ones:
|
||||
assert_eq!(result[0], ntt_result[0]);
|
||||
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_scalar_mul() {
|
||||
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
|
||||
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
mult_sc_vec(&mut intoo, &expected, 0);
|
||||
assert_eq!(intoo, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_point_mul() {
|
||||
let dummy_one = Point {
|
||||
x: Base::one(),
|
||||
y: Base::one(),
|
||||
z: Base::one(),
|
||||
};
|
||||
|
||||
let mut inout = [dummy_one, dummy_one, Point::zero()];
|
||||
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
let expected = [dummy_one, Point::zero(), Point::zero()];
|
||||
multp_vec(&mut inout, &scalars, 0);
|
||||
assert_eq!(inout, expected);
|
||||
}
|
||||
}
|
||||
34
bls12-381/Cargo.toml
Normal file
34
bls12-381/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "bls12-381"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = [ "Ingonyama" ]
|
||||
|
||||
[dependencies]
|
||||
icicle-core = { path = "../icicle-core" }
|
||||
|
||||
hex = "*"
|
||||
ark-std = "0.3.0"
|
||||
ark-ff = "0.3.0"
|
||||
ark-poly = "0.3.0"
|
||||
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
|
||||
ark-bls12-381 = "0.3.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_derive = "1.0"
|
||||
serde_cbor = "0.11.2"
|
||||
|
||||
rustacuda = "0.1"
|
||||
rustacuda_core = "0.1"
|
||||
rustacuda_derive = "0.1"
|
||||
|
||||
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
"criterion" = "0.4.0"
|
||||
|
||||
[features]
|
||||
g2 = []
|
||||
36
bls12-381/build.rs
Normal file
36
bls12-381/build.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
//TODO: check cargo features selected
|
||||
//TODO: can conflict/duplicate with make ?
|
||||
|
||||
println!("cargo:rerun-if-env-changed=CXXFLAGS");
|
||||
println!("cargo:rerun-if-changed=./icicle");
|
||||
|
||||
let arch_type = env::var("ARCH_TYPE").unwrap_or(String::from("native"));
|
||||
let stream_type = env::var("DEFAULT_STREAM").unwrap_or(String::from("legacy"));
|
||||
|
||||
let mut arch = String::from("-arch=");
|
||||
arch.push_str(&arch_type);
|
||||
let mut stream = String::from("-default-stream=");
|
||||
stream.push_str(&stream_type);
|
||||
|
||||
let mut nvcc = cc::Build::new();
|
||||
|
||||
println!("Compiling icicle library using arch: {}", &arch);
|
||||
|
||||
if cfg!(feature = "g2") {
|
||||
nvcc.define("G2_DEFINED", None);
|
||||
}
|
||||
nvcc.cuda(true);
|
||||
nvcc.define("FEATURE_BLS12_381", None);
|
||||
nvcc.debug(false);
|
||||
nvcc.flag(&arch);
|
||||
nvcc.flag(&stream);
|
||||
nvcc.shared_flag(false);
|
||||
// nvcc.static_flag(true);
|
||||
nvcc.files([
|
||||
"../icicle-cuda/curves/index.cu",
|
||||
]);
|
||||
nvcc.compile("ingo_icicle"); //TODO: extension??
|
||||
}
|
||||
4
bls12-381/src/basic_structs/field.rs
Normal file
4
bls12-381/src/basic_structs/field.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub trait Field<const NUM_LIMBS: usize> {
|
||||
const MODOLUS: [u32;NUM_LIMBS];
|
||||
const LIMBS: usize = NUM_LIMBS;
|
||||
}
|
||||
3
bls12-381/src/basic_structs/mod.rs
Normal file
3
bls12-381/src/basic_structs/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod field;
|
||||
pub mod scalar;
|
||||
pub mod point;
|
||||
106
bls12-381/src/basic_structs/point.rs
Normal file
106
bls12-381/src/basic_structs/point.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use std::ffi::c_uint;
|
||||
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger256, PrimeField};
|
||||
use std::mem::transmute;
|
||||
use ark_ff::Field;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
|
||||
use super::scalar::{get_fixed_limbs, self};
|
||||
|
||||
|
||||
#[derive(Debug, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointT<BF: scalar::ScalarTrait> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
pub z: BF,
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
|
||||
fn default() -> Self {
|
||||
PointT::zero()
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn zero() -> Self {
|
||||
PointT {
|
||||
x: BF::zero(),
|
||||
y: BF::one(),
|
||||
z: BF::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn infinity() -> Self {
|
||||
Self::zero()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointAffineNoInfinityT<BF> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
}
|
||||
|
||||
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
|
||||
fn default() -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::zero(),
|
||||
y: BF::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
|
||||
///From u32 limbs x,y
|
||||
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limbs(&self) -> Vec<u32> {
|
||||
[self.x.limbs(), self.y.limbs()].concat()
|
||||
}
|
||||
|
||||
pub fn to_projective(&self) -> PointT<BF> {
|
||||
PointT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
z: BF::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
|
||||
PointT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y),
|
||||
z: BF::from_limbs(z)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
|
||||
let l = value.len();
|
||||
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
|
||||
PointT {
|
||||
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
|
||||
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
|
||||
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
|
||||
PointAffineNoInfinityT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
}
|
||||
}
|
||||
}
|
||||
102
bls12-381/src/basic_structs/scalar.rs
Normal file
102
bls12-381/src/basic_structs/scalar.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination};
|
||||
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use super::field::{Field, self};
|
||||
|
||||
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
|
||||
match val.len() {
|
||||
n if n < NUM_LIMBS => {
|
||||
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
|
||||
padded[..val.len()].copy_from_slice(&val);
|
||||
padded
|
||||
}
|
||||
n if n == NUM_LIMBS => val.try_into().unwrap(),
|
||||
_ => panic!("slice has too many elements"),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ScalarTrait{
|
||||
fn base_limbs() -> usize;
|
||||
fn zero() -> Self;
|
||||
fn from_limbs(value: &[u32]) -> Self;
|
||||
fn one() -> Self;
|
||||
fn to_bytes_le(&self) -> Vec<u8>;
|
||||
fn limbs(&self) -> &[u32];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarT<M, const NUM_LIMBS: usize> {
|
||||
pub(crate) phantom: PhantomData<M>,
|
||||
pub(crate) value : [u32; NUM_LIMBS]
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
|
||||
where
|
||||
M: Field<NUM_LIMBS>,
|
||||
{
|
||||
|
||||
fn base_limbs() -> usize {
|
||||
return NUM_LIMBS;
|
||||
}
|
||||
|
||||
fn zero() -> Self {
|
||||
ScalarT {
|
||||
value: [0u32; NUM_LIMBS],
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_limbs(value: &[u32]) -> Self {
|
||||
Self {
|
||||
value: get_fixed_limbs(value),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn one() -> Self {
|
||||
let mut s = [0u32; NUM_LIMBS];
|
||||
s[0] = 1;
|
||||
ScalarT { value: s, phantom: PhantomData }
|
||||
}
|
||||
|
||||
fn to_bytes_le(&self) -> Vec<u8> {
|
||||
self.value
|
||||
.iter()
|
||||
.map(|s| s.to_le_bytes().to_vec())
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn limbs(&self) -> &[u32] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
|
||||
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
Self::from_limbs(value)
|
||||
}
|
||||
|
||||
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
let mut value = value.to_vec();
|
||||
value.reverse();
|
||||
Self::from_limbs_le(&value)
|
||||
}
|
||||
|
||||
// Additional Functions
|
||||
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
|
||||
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
|
||||
}
|
||||
}
|
||||
62
bls12-381/src/curve_structs.rs
Normal file
62
bls12-381/src/curve_structs.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
|
||||
use crate::basic_structs::scalar::ScalarT;
|
||||
use crate::basic_structs::field::Field;
|
||||
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarField;
|
||||
impl Field<8> for ScalarField {
|
||||
const MODOLUS: [u32; 8] = [0x0;8];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct BaseField;
|
||||
impl Field<12> for BaseField {
|
||||
const MODOLUS: [u32; 12] = [0x0;12];
|
||||
}
|
||||
|
||||
|
||||
pub type Scalar = ScalarT<ScalarField,8>;
|
||||
impl Default for Scalar {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Scalar{}
|
||||
|
||||
|
||||
pub type Base = ScalarT<BaseField,12>;
|
||||
impl Default for Base {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Base{}
|
||||
|
||||
pub type Point = PointT<Base>;
|
||||
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
|
||||
|
||||
extern "C" {
|
||||
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
|
||||
}
|
||||
|
||||
impl PartialEq for Point {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
unsafe { eq(self, other) != 0 }
|
||||
}
|
||||
}
|
||||
798
bls12-381/src/from_cuda.rs
Normal file
798
bls12-381/src/from_cuda.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bls12_381::{Fq as Fq_BLS12_381, Fr as Fr_BLS12_381, G1Affine as G1Affine_BLS12_381, G1Projective as G1Projective_BLS12_381};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
extern "C" {
|
||||
fn msm_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn msm_batch_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
batch_size: usize,
|
||||
msm_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
|
||||
|
||||
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ntt_batch_cuda(
|
||||
inout: *mut Scalar,
|
||||
arr_size: usize,
|
||||
n: usize,
|
||||
inverse: bool,
|
||||
) -> c_int;
|
||||
|
||||
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
|
||||
|
||||
fn interpolate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_batch_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_batch_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_point(
|
||||
inout: *mut Point,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_scalar(
|
||||
inout: *mut Scalar,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn matrix_vec_mod_mult(
|
||||
matrix_flattened: *const Scalar,
|
||||
input: *const Scalar,
|
||||
output: *mut Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
}
|
||||
|
||||
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
|
||||
let mut ret = Point::zero();
|
||||
unsafe {
|
||||
msm_cuda(
|
||||
&mut ret as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
scalars.len(),
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn msm_batch(
|
||||
points: &[PointAffineNoInfinity],
|
||||
scalars: &[Scalar],
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> Vec<Point> {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
|
||||
let mut ret = vec![Point::zero(); batch_size];
|
||||
|
||||
unsafe {
|
||||
msm_batch_cuda(
|
||||
&mut ret[0] as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
batch_size,
|
||||
count / batch_size,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn commit(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBox<Point> {
|
||||
let mut res = DeviceBox::new(&Point::zero()).unwrap();
|
||||
unsafe {
|
||||
commit_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn commit_batch(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
|
||||
unsafe {
|
||||
commit_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
|
||||
let ret_code = unsafe {
|
||||
ntt_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
ret_code
|
||||
}
|
||||
|
||||
pub fn ntt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, false);
|
||||
}
|
||||
|
||||
pub fn intt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal_batch(
|
||||
values: &mut [Scalar],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ntt_batch_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
|
||||
unsafe {
|
||||
ecntt_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, false, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, true, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal_batch(
|
||||
values: &mut [Point],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ecntt_batch_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
|
||||
unsafe {
|
||||
DeviceBuffer::from_raw_parts(build_domain_cuda(
|
||||
domain_size,
|
||||
logn,
|
||||
inverse,
|
||||
0
|
||||
), domain_size)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn reverse_order_scalars(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_scalars_batch(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_batch_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
) {
|
||||
unsafe { reverse_order_points_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points_batch(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_points_batch_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_point(
|
||||
a as *mut _ as *mut Point,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_scalar(
|
||||
a as *mut _ as *mut Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
|
||||
let mut c = Vec::with_capacity(b.len());
|
||||
for i in 0..b.len() {
|
||||
c.push(Scalar::zero());
|
||||
}
|
||||
unsafe {
|
||||
matrix_vec_mod_mult(
|
||||
a as *const _ as *const Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
c.as_mut_slice() as *mut _ as *mut Scalar,
|
||||
b.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
|
||||
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
|
||||
unsafe { buf_cpy.copy_from(buf) };
|
||||
return buf_cpy;
|
||||
}
|
||||
|
||||
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
|
||||
let rng: Box<dyn RngCore> = match seed {
|
||||
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
|
||||
None => Box::new(rand::thread_rng()),
|
||||
};
|
||||
rng
|
||||
}
|
||||
|
||||
fn set_up_device() {
|
||||
// Set up the context, load the module, and create a stream to run kernels in.
|
||||
rustacuda::init(CudaFlags::empty()).unwrap();
|
||||
let device = Device::get_device(0).unwrap();
|
||||
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
|
||||
}
|
||||
|
||||
pub fn generate_random_points(
|
||||
count: usize,
|
||||
mut rng: Box<dyn RngCore>,
|
||||
) -> Vec<PointAffineNoInfinity> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BLS12_381::rand(&mut rng)).to_xy_strip_z())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BLS12_381::rand(&mut rng)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
|
||||
(0..count)
|
||||
.map(|_| Scalar::from_ark(Fr_BLS12_381::rand(&mut rng).into_repr()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalar
|
||||
let vector = generate_random_points_proj(test_size, get_rng(seed));
|
||||
let mut vector_mut = vector.clone();
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalars
|
||||
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
4
bls12-381/src/lib.rs
Normal file
4
bls12-381/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod test_bls12_381;
|
||||
pub mod basic_structs;
|
||||
pub mod from_cuda;
|
||||
pub mod curve_structs;
|
||||
816
bls12-381/src/test_bls12_381.rs
Normal file
816
bls12-381/src/test_bls12_381.rs
Normal file
@@ -0,0 +1,816 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
pub use crate::basic_structs::scalar::ScalarTrait;
|
||||
pub use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bls12_381::{Fq as Fq_BLS12_381, Fr as Fr_BLS12_381, G1Affine as G1Affine_BLS12_381, G1Projective as G1Projective_BLS12_381};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
|
||||
impl Scalar {
|
||||
pub fn to_biginteger254(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn to_ark(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_biginteger256(ark: BigInteger256) -> Self {
|
||||
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
|
||||
}
|
||||
|
||||
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
|
||||
unsafe { transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
|
||||
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
|
||||
}
|
||||
|
||||
pub fn to_ark_transmute(&self) -> Fr_BLS12_381 {
|
||||
unsafe { std::mem::transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_ark_transmute(v: &Fr_BLS12_381) -> Scalar {
|
||||
unsafe { std::mem::transmute_copy(v) }
|
||||
}
|
||||
|
||||
pub fn to_ark_mod_p(&self) -> Fr_BLS12_381 {
|
||||
Fr_BLS12_381::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> Fr_BLS12_381 {
|
||||
Fr_BLS12_381::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
|
||||
}
|
||||
|
||||
pub fn from_ark(v: BigInteger256) -> Scalar {
|
||||
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Base {
|
||||
pub fn to_ark(&self) -> BigInteger384 {
|
||||
BigInteger384::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: BigInteger384) -> Self {
|
||||
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Point {
|
||||
pub fn to_ark(&self) -> G1Projective_BLS12_381 {
|
||||
self.to_ark_affine().into_projective()
|
||||
}
|
||||
|
||||
pub fn to_ark_affine(&self) -> G1Affine_BLS12_381 {
|
||||
//TODO: generic conversion
|
||||
use ark_ff::Field;
|
||||
use std::ops::Mul;
|
||||
let proj_x_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.x.to_bytes_le());
|
||||
let proj_y_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.y.to_bytes_le());
|
||||
let proj_z_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.z.to_bytes_le());
|
||||
let inverse_z = proj_z_field.inverse().unwrap();
|
||||
let aff_x = proj_x_field.mul(inverse_z);
|
||||
let aff_y = proj_y_field.mul(inverse_z);
|
||||
G1Affine_BLS12_381::new(aff_x, aff_y, false)
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: G1Projective_BLS12_381) -> Point {
|
||||
use ark_ff::Field;
|
||||
let z_inv = ark.z.inverse().unwrap();
|
||||
let z_invsq = z_inv * z_inv;
|
||||
let z_invq3 = z_invsq * z_inv;
|
||||
Point {
|
||||
x: Base::from_ark((ark.x * z_invsq).into_repr()),
|
||||
y: Base::from_ark((ark.y * z_invq3).into_repr()),
|
||||
z: Base::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PointAffineNoInfinity {
|
||||
|
||||
pub fn to_ark(&self) -> G1Affine_BLS12_381 {
|
||||
G1Affine_BLS12_381::new(Fq_BLS12_381::new(self.x.to_ark()), Fq_BLS12_381::new(self.y.to_ark()), false)
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> G1Affine_BLS12_381 {
|
||||
G1Affine_BLS12_381::new(
|
||||
Fq_BLS12_381::from_repr(self.x.to_ark()).unwrap(),
|
||||
Fq_BLS12_381::from_repr(self.y.to_ark()).unwrap(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn from_ark(p: &G1Affine_BLS12_381) -> Self {
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(p.x.into_repr()),
|
||||
y: Base::from_ark(p.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Point {
|
||||
pub fn to_affine(&self) -> PointAffineNoInfinity {
|
||||
let ark_affine = self.to_ark_affine();
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(ark_affine.x.into_repr()),
|
||||
y: Base::from_ark(ark_affine.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_bls12_381 {
|
||||
use std::ops::Add;
|
||||
use ark_bls12_381::{Fr, G1Affine, G1Projective};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::{FftField, Field, Zero, PrimeField};
|
||||
use ark_std::UniformRand;
|
||||
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
|
||||
use crate::curve_structs::{Point, Scalar, Base};
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
|
||||
|
||||
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let mut points_ga: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..nof_elements {
|
||||
let aff = G1Projective::rand(&mut rng);
|
||||
points_ga.push(aff);
|
||||
}
|
||||
points_ga
|
||||
}
|
||||
|
||||
fn ecntt_arc_naive(
|
||||
points: &Vec<G1Projective>,
|
||||
size: usize,
|
||||
inverse: bool,
|
||||
) -> Vec<G1Projective> {
|
||||
let mut result: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..size {
|
||||
result.push(G1Projective::zero());
|
||||
}
|
||||
let rou: Fr;
|
||||
if !inverse {
|
||||
rou = Fr::get_root_of_unity(size).unwrap();
|
||||
} else {
|
||||
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
|
||||
}
|
||||
for k in 0..size {
|
||||
for l in 0..size {
|
||||
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
|
||||
let mul_rou = Fr::pow(&rou, &pow);
|
||||
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
|
||||
}
|
||||
}
|
||||
if inverse {
|
||||
let size2 = size as u64;
|
||||
for k in 0..size {
|
||||
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
|
||||
result[k] = result[k].into_affine().mul(multfactor);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
|
||||
let mut eq = true;
|
||||
for i in 0..points.len() {
|
||||
if points2[i].ne(&points[i]) {
|
||||
eq = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return eq;
|
||||
}
|
||||
|
||||
fn test_naive_ark_ecntt(size: usize) {
|
||||
let points = random_points_ark_proj(size);
|
||||
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
|
||||
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
|
||||
assert!(!check_eq(&result2, &result1));
|
||||
assert!(check_eq(&result2, &points));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msm() {
|
||||
let test_sizes = [6, 9];
|
||||
|
||||
for pow2 in test_sizes {
|
||||
let count = 1 << pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points = generate_random_points(count, get_rng(seed));
|
||||
let scalars = generate_random_scalars(count, get_rng(seed));
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
|
||||
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
|
||||
|
||||
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
|
||||
assert_eq!(msm_result.to_ark(), msm_result_ark);
|
||||
assert_eq!(
|
||||
msm_result.to_ark_affine(),
|
||||
Point::from_ark(msm_result_ark).to_ark_affine()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_msm() {
|
||||
for batch_pow2 in [2, 4] {
|
||||
for pow2 in [4, 6] {
|
||||
let msm_size = 1 << pow2;
|
||||
let batch_size = 1 << batch_pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
|
||||
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
|
||||
|
||||
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let expected: Vec<_> = point_r_ark
|
||||
.chunks(msm_size)
|
||||
.zip(scalars_r_ark.chunks(msm_size))
|
||||
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
|
||||
.collect();
|
||||
|
||||
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
|
||||
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
let test_size = 1 << 8;
|
||||
let seed = Some(0);
|
||||
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
|
||||
let mut points = generate_random_points(test_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
|
||||
let mut h_commit_result = Point::zero();
|
||||
d_commit_result.copy_to(&mut h_commit_result).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
assert_ne!(msm_result, Point::zero());
|
||||
assert_ne!(h_commit_result, Point::zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_commit() {
|
||||
let batch_size = 4;
|
||||
let test_size = 1 << 12;
|
||||
let seed = Some(0);
|
||||
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
|
||||
let points = generate_random_points(test_size * batch_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
|
||||
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
|
||||
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
|
||||
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
for h in h_commit_result {
|
||||
assert_ne!(h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 3;
|
||||
|
||||
let scalars = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut ntt_result = scalars.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
assert_ne!(ntt_result, scalars);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
intt(&mut intt_result, 0);
|
||||
|
||||
assert_eq!(intt_result, scalars);
|
||||
|
||||
//ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
|
||||
|
||||
test_naive_ark_ecntt(test_size);
|
||||
|
||||
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
|
||||
|
||||
//naive ark
|
||||
let points_proj_ark = points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark())
|
||||
.collect::<Vec<G1Projective>>();
|
||||
|
||||
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
|
||||
|
||||
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
|
||||
|
||||
assert_eq!(points_proj_ark, iecntt_result_naive);
|
||||
|
||||
//ingo gpu
|
||||
let mut ecntt_result = points_proj.to_vec();
|
||||
ecntt(&mut ecntt_result, 0);
|
||||
|
||||
assert_ne!(ecntt_result, points_proj);
|
||||
|
||||
let mut iecntt_result = ecntt_result.clone();
|
||||
iecntt(&mut iecntt_result, 0);
|
||||
|
||||
assert_eq!(
|
||||
iecntt_result_naive,
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
assert_eq!(
|
||||
iecntt_result
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>(),
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt_batch() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let batches = 4;
|
||||
|
||||
let scalars_batch: Vec<Scalar> =
|
||||
generate_random_scalars(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result = scalars_batch.clone();
|
||||
|
||||
// do batch ntt
|
||||
ntt_batch(&mut ntt_result, test_size, 0);
|
||||
|
||||
let mut ntt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do ntt for every chunk
|
||||
for i in 0..batches {
|
||||
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
|
||||
ntt(&mut ntt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_vec_of_vec[i],
|
||||
ntt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
// check that ntt output is different from input
|
||||
assert_ne!(ntt_result, scalars_batch);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
// do batch intt
|
||||
intt_batch(&mut intt_result, test_size, 0);
|
||||
|
||||
let mut intt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do intt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
|
||||
intt(&mut intt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the intt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_vec_of_vec[i],
|
||||
intt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result, scalars_batch);
|
||||
|
||||
// //ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result_points = points_proj.clone();
|
||||
|
||||
// do batch ecintt
|
||||
ecntt_batch(&mut ntt_result_points, test_size, 0);
|
||||
|
||||
let mut ntt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
|
||||
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_points_vec_of_vec[i],
|
||||
ntt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_ne!(ntt_result_points, points_proj);
|
||||
|
||||
let mut intt_result_points = ntt_result_points.clone();
|
||||
|
||||
// do batch ecintt
|
||||
iecntt_batch(&mut intt_result_points, test_size, 0);
|
||||
|
||||
let mut intt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
// do ecintt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
|
||||
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_points_vec_of_vec[i],
|
||||
intt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result_points, points_proj);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_interpolation() {
|
||||
let log_test_size = 7;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
|
||||
intt(&mut evals_mut, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 10;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
intt_batch(&mut evals_mut, test_size, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_interpolation() {
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_points(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
|
||||
iecntt(&mut evals_mut[..], 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_points_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
iecntt_batch(&mut evals_mut[..], test_size, 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation() {
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size.. (1 << log_test_domain_size) {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation() {
|
||||
let batch_size = 6;
|
||||
let log_test_domain_size = 8;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation() {
|
||||
let log_test_domain_size = 7;
|
||||
let coeff_size = 1 << 7;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size..(1 << log_test_domain_size) {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
for i in 0..coeff_size {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation() {
|
||||
let batch_size = 4;
|
||||
let log_test_domain_size = 6;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 5;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
for i in j * domain_size..(j * domain_size + coeff_size) {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_trivial_coset() {
|
||||
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
|
||||
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_evals_coset);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
for i in 0..test_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 2;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
for i in 0..test_size * batch_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
|
||||
#[test]
|
||||
fn test_matrix_multiplication() {
|
||||
let seed = None; // some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let rou = Fr::get_root_of_unity(test_size).unwrap();
|
||||
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|
||||
|row_num| { (0..test_size).map(
|
||||
|col_num| {
|
||||
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
|
||||
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
|
||||
}).collect::<Vec<Scalar>>()
|
||||
}).flatten().collect::<Vec<_>>();
|
||||
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
|
||||
let mut ntt_result = vector.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
// we don't use the same roots of unity as arkworks, so the results are permutations
|
||||
// of one another and the only guaranteed fixed scalars are the following ones:
|
||||
assert_eq!(result[0], ntt_result[0]);
|
||||
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_scalar_mul() {
|
||||
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
|
||||
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
mult_sc_vec(&mut intoo, &expected, 0);
|
||||
assert_eq!(intoo, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_point_mul() {
|
||||
let dummy_one = Point {
|
||||
x: Base::one(),
|
||||
y: Base::one(),
|
||||
z: Base::one(),
|
||||
};
|
||||
|
||||
let mut inout = [dummy_one, dummy_one, Point::zero()];
|
||||
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
let expected = [dummy_one, Point::zero(), Point::zero()];
|
||||
multp_vec(&mut inout, &scalars, 0);
|
||||
assert_eq!(inout, expected);
|
||||
}
|
||||
}
|
||||
34
bn254/Cargo.toml
Normal file
34
bn254/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "bn254"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = [ "Ingonyama" ]
|
||||
|
||||
[dependencies]
|
||||
icicle-core = { path = "../icicle-core" }
|
||||
|
||||
hex = "*"
|
||||
ark-std = "0.3.0"
|
||||
ark-ff = "0.3.0"
|
||||
ark-poly = "0.3.0"
|
||||
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
|
||||
ark-bn254 = "0.3.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_derive = "1.0"
|
||||
serde_cbor = "0.11.2"
|
||||
|
||||
rustacuda = "0.1"
|
||||
rustacuda_core = "0.1"
|
||||
rustacuda_derive = "0.1"
|
||||
|
||||
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
"criterion" = "0.4.0"
|
||||
|
||||
[features]
|
||||
g2 = []
|
||||
36
bn254/build.rs
Normal file
36
bn254/build.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
//TODO: check cargo features selected
|
||||
//TODO: can conflict/duplicate with make ?
|
||||
|
||||
println!("cargo:rerun-if-env-changed=CXXFLAGS");
|
||||
println!("cargo:rerun-if-changed=./icicle");
|
||||
|
||||
let arch_type = env::var("ARCH_TYPE").unwrap_or(String::from("native"));
|
||||
let stream_type = env::var("DEFAULT_STREAM").unwrap_or(String::from("legacy"));
|
||||
|
||||
let mut arch = String::from("-arch=");
|
||||
arch.push_str(&arch_type);
|
||||
let mut stream = String::from("-default-stream=");
|
||||
stream.push_str(&stream_type);
|
||||
|
||||
let mut nvcc = cc::Build::new();
|
||||
|
||||
println!("Compiling icicle library using arch: {}", &arch);
|
||||
|
||||
if cfg!(feature = "g2") {
|
||||
nvcc.define("G2_DEFINED", None);
|
||||
}
|
||||
nvcc.cuda(true);
|
||||
nvcc.define("FEATURE_BN254", None);
|
||||
nvcc.debug(false);
|
||||
nvcc.flag(&arch);
|
||||
nvcc.flag(&stream);
|
||||
nvcc.shared_flag(false);
|
||||
// nvcc.static_flag(true);
|
||||
nvcc.files([
|
||||
"../icicle-cuda/curves/index.cu",
|
||||
]);
|
||||
nvcc.compile("ingo_icicle"); //TODO: extension??
|
||||
}
|
||||
4
bn254/src/basic_structs/field.rs
Normal file
4
bn254/src/basic_structs/field.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub trait Field<const NUM_LIMBS: usize> {
|
||||
const MODOLUS: [u32;NUM_LIMBS];
|
||||
const LIMBS: usize = NUM_LIMBS;
|
||||
}
|
||||
3
bn254/src/basic_structs/mod.rs
Normal file
3
bn254/src/basic_structs/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod field;
|
||||
pub mod scalar;
|
||||
pub mod point;
|
||||
108
bn254/src/basic_structs/point.rs
Normal file
108
bn254/src/basic_structs/point.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use std::ffi::c_uint;
|
||||
|
||||
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
|
||||
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger256, PrimeField};
|
||||
use std::mem::transmute;
|
||||
use ark_ff::Field;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
|
||||
use super::scalar::{get_fixed_limbs, self};
|
||||
|
||||
|
||||
#[derive(Debug, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointT<BF: scalar::ScalarTrait> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
pub z: BF,
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
|
||||
fn default() -> Self {
|
||||
PointT::zero()
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn zero() -> Self {
|
||||
PointT {
|
||||
x: BF::zero(),
|
||||
y: BF::one(),
|
||||
z: BF::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn infinity() -> Self {
|
||||
Self::zero()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointAffineNoInfinityT<BF> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
}
|
||||
|
||||
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
|
||||
fn default() -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::zero(),
|
||||
y: BF::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
|
||||
///From u32 limbs x,y
|
||||
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limbs(&self) -> Vec<u32> {
|
||||
[self.x.limbs(), self.y.limbs()].concat()
|
||||
}
|
||||
|
||||
pub fn to_projective(&self) -> PointT<BF> {
|
||||
PointT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
z: BF::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
|
||||
PointT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y),
|
||||
z: BF::from_limbs(z)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
|
||||
let l = value.len();
|
||||
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
|
||||
PointT {
|
||||
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
|
||||
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
|
||||
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
|
||||
PointAffineNoInfinityT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
}
|
||||
}
|
||||
}
|
||||
102
bn254/src/basic_structs/scalar.rs
Normal file
102
bn254/src/basic_structs/scalar.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination};
|
||||
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use super::field::{Field, self};
|
||||
|
||||
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
|
||||
match val.len() {
|
||||
n if n < NUM_LIMBS => {
|
||||
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
|
||||
padded[..val.len()].copy_from_slice(&val);
|
||||
padded
|
||||
}
|
||||
n if n == NUM_LIMBS => val.try_into().unwrap(),
|
||||
_ => panic!("slice has too many elements"),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ScalarTrait{
|
||||
fn base_limbs() -> usize;
|
||||
fn zero() -> Self;
|
||||
fn from_limbs(value: &[u32]) -> Self;
|
||||
fn one() -> Self;
|
||||
fn to_bytes_le(&self) -> Vec<u8>;
|
||||
fn limbs(&self) -> &[u32];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarT<M, const NUM_LIMBS: usize> {
|
||||
pub(crate) phantom: PhantomData<M>,
|
||||
pub(crate) value : [u32; NUM_LIMBS]
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
|
||||
where
|
||||
M: Field<NUM_LIMBS>,
|
||||
{
|
||||
|
||||
fn base_limbs() -> usize {
|
||||
return NUM_LIMBS;
|
||||
}
|
||||
|
||||
fn zero() -> Self {
|
||||
ScalarT {
|
||||
value: [0u32; NUM_LIMBS],
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_limbs(value: &[u32]) -> Self {
|
||||
Self {
|
||||
value: get_fixed_limbs(value),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn one() -> Self {
|
||||
let mut s = [0u32; NUM_LIMBS];
|
||||
s[0] = 1;
|
||||
ScalarT { value: s, phantom: PhantomData }
|
||||
}
|
||||
|
||||
fn to_bytes_le(&self) -> Vec<u8> {
|
||||
self.value
|
||||
.iter()
|
||||
.map(|s| s.to_le_bytes().to_vec())
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn limbs(&self) -> &[u32] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
|
||||
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
Self::from_limbs(value)
|
||||
}
|
||||
|
||||
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
let mut value = value.to_vec();
|
||||
value.reverse();
|
||||
Self::from_limbs_le(&value)
|
||||
}
|
||||
|
||||
// Additional Functions
|
||||
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
|
||||
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
|
||||
}
|
||||
}
|
||||
62
bn254/src/curve_structs.rs
Normal file
62
bn254/src/curve_structs.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
|
||||
use crate::basic_structs::scalar::ScalarT;
|
||||
use crate::basic_structs::field::Field;
|
||||
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarField;
|
||||
impl Field<8> for ScalarField {
|
||||
const MODOLUS: [u32; 8] = [0x0;8];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct BaseField;
|
||||
impl Field<8> for BaseField {
|
||||
const MODOLUS: [u32; 8] = [0x0;8];
|
||||
}
|
||||
|
||||
|
||||
pub type Scalar = ScalarT<ScalarField,8>;
|
||||
impl Default for Scalar {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Scalar{}
|
||||
|
||||
|
||||
pub type Base = ScalarT<BaseField,8>;
|
||||
impl Default for Base {
|
||||
fn default() -> Self {
|
||||
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceCopy for Base{}
|
||||
|
||||
pub type Point = PointT<Base>;
|
||||
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
|
||||
|
||||
extern "C" {
|
||||
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
|
||||
}
|
||||
|
||||
impl PartialEq for Point {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
unsafe { eq(self, other) != 0 }
|
||||
}
|
||||
}
|
||||
797
bn254/src/from_cuda.rs
Normal file
797
bn254/src/from_cuda.rs
Normal file
@@ -0,0 +1,797 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
extern "C" {
|
||||
fn msm_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn msm_batch_cuda(
|
||||
out: *mut Point,
|
||||
points: *const PointAffineNoInfinity,
|
||||
scalars: *const Scalar,
|
||||
batch_size: usize,
|
||||
msm_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn commit_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_scalars: DevicePointer<Scalar>,
|
||||
d_points: DevicePointer<PointAffineNoInfinity>,
|
||||
count: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> c_uint;
|
||||
|
||||
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
|
||||
|
||||
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
|
||||
|
||||
fn ntt_batch_cuda(
|
||||
inout: *mut Scalar,
|
||||
arr_size: usize,
|
||||
n: usize,
|
||||
inverse: bool,
|
||||
) -> c_int;
|
||||
|
||||
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
|
||||
|
||||
fn interpolate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_evaluations: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn interpolate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_evaluations: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_scalars_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Scalar>,
|
||||
d_coefficients: DevicePointer<Scalar>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn evaluate_points_on_coset_batch_cuda(
|
||||
d_out: DevicePointer<Point>,
|
||||
d_coefficients: DevicePointer<Point>,
|
||||
d_domain: DevicePointer<Scalar>,
|
||||
domain_size: usize,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
coset_powers: DevicePointer<Scalar>,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_scalars_batch_cuda(
|
||||
d_arr: DevicePointer<Scalar>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn reverse_order_points_batch_cuda(
|
||||
d_arr: DevicePointer<Point>,
|
||||
n: usize,
|
||||
batch_size: usize,
|
||||
device_id: usize
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_point(
|
||||
inout: *mut Point,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn vec_mod_mult_scalar(
|
||||
inout: *mut Scalar,
|
||||
scalars: *const Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
|
||||
fn matrix_vec_mod_mult(
|
||||
matrix_flattened: *const Scalar,
|
||||
input: *const Scalar,
|
||||
output: *mut Scalar,
|
||||
n_elements: usize,
|
||||
device_id: usize,
|
||||
) -> c_int;
|
||||
}
|
||||
|
||||
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
let mut ret = Point::zero();
|
||||
unsafe {
|
||||
msm_cuda(
|
||||
&mut ret as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
scalars.len(),
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn msm_batch(
|
||||
points: &[PointAffineNoInfinity],
|
||||
scalars: &[Scalar],
|
||||
batch_size: usize,
|
||||
device_id: usize,
|
||||
) -> Vec<Point> {
|
||||
let count = points.len();
|
||||
if count != scalars.len() {
|
||||
todo!("variable length")
|
||||
}
|
||||
|
||||
let mut ret = vec![Point::zero(); batch_size];
|
||||
|
||||
unsafe {
|
||||
msm_batch_cuda(
|
||||
&mut ret[0] as *mut _ as *mut Point,
|
||||
points as *const _ as *const PointAffineNoInfinity,
|
||||
scalars as *const _ as *const Scalar,
|
||||
batch_size,
|
||||
count / batch_size,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn commit(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBox<Point> {
|
||||
let mut res = DeviceBox::new(&Point::zero()).unwrap();
|
||||
unsafe {
|
||||
commit_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn commit_batch(
|
||||
points: &mut DeviceBuffer<PointAffineNoInfinity>,
|
||||
scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
|
||||
unsafe {
|
||||
commit_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
scalars.as_device_ptr(),
|
||||
points.as_device_ptr(),
|
||||
scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0,
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
|
||||
let ret_code = unsafe {
|
||||
ntt_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
};
|
||||
ret_code
|
||||
}
|
||||
|
||||
pub fn ntt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, false);
|
||||
}
|
||||
|
||||
pub fn intt(values: &mut [Scalar], device_id: usize) {
|
||||
ntt_internal(values, device_id, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place NTT on the input data.
|
||||
fn ntt_internal_batch(
|
||||
values: &mut [Scalar],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ntt_batch_cuda(
|
||||
values as *mut _ as *mut Scalar,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
|
||||
ntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
|
||||
unsafe {
|
||||
ecntt_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
inverse,
|
||||
device_id,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, false, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt(values: &mut [Point], device_id: usize) {
|
||||
ecntt_internal(values, true, device_id);
|
||||
}
|
||||
|
||||
/// Compute an in-place ECNTT on the input data.
|
||||
fn ecntt_internal_batch(
|
||||
values: &mut [Point],
|
||||
device_id: usize,
|
||||
batch_size: usize,
|
||||
inverse: bool,
|
||||
) -> i32 {
|
||||
unsafe {
|
||||
ecntt_batch_cuda(
|
||||
values as *mut _ as *mut Point,
|
||||
values.len(),
|
||||
batch_size,
|
||||
inverse,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, false);
|
||||
}
|
||||
|
||||
/// Compute an in-place iECNTT on the input data.
|
||||
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
|
||||
ecntt_internal_batch(values, 0, batch_size, true);
|
||||
}
|
||||
|
||||
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
|
||||
unsafe {
|
||||
DeviceBuffer::from_raw_parts(build_domain_cuda(
|
||||
domain_size,
|
||||
logn,
|
||||
inverse,
|
||||
0
|
||||
), domain_size)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn reverse_order_scalars(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_scalars_batch(
|
||||
d_scalars: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_scalars_batch_cuda(
|
||||
d_scalars.as_device_ptr(),
|
||||
d_scalars.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
) {
|
||||
unsafe { reverse_order_points_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len(),
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn reverse_order_points_batch(
|
||||
d_points: &mut DeviceBuffer<Point>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
unsafe { reverse_order_points_batch_cuda(
|
||||
d_points.as_device_ptr(),
|
||||
d_points.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
); }
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_scalars_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe { interpolate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn interpolate_points_batch(
|
||||
d_evaluations: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe { interpolate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_evaluations.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
batch_size,
|
||||
0
|
||||
) };
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_scalars_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Scalar>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Scalar> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_scalars_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len(),
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn evaluate_points_on_coset_batch(
|
||||
d_coefficients: &mut DeviceBuffer<Point>,
|
||||
d_domain: &mut DeviceBuffer<Scalar>,
|
||||
batch_size: usize,
|
||||
coset_powers: &mut DeviceBuffer<Scalar>,
|
||||
) -> DeviceBuffer<Point> {
|
||||
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
|
||||
unsafe {
|
||||
evaluate_points_on_coset_batch_cuda(
|
||||
res.as_device_ptr(),
|
||||
d_coefficients.as_device_ptr(),
|
||||
d_domain.as_device_ptr(),
|
||||
d_domain.len(),
|
||||
d_coefficients.len() / batch_size,
|
||||
batch_size,
|
||||
coset_powers.as_device_ptr(),
|
||||
0
|
||||
);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_point(
|
||||
a as *mut _ as *mut Point,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
|
||||
assert_eq!(a.len(), b.len());
|
||||
unsafe {
|
||||
vec_mod_mult_scalar(
|
||||
a as *mut _ as *mut Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
a.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
|
||||
let mut c = Vec::with_capacity(b.len());
|
||||
for i in 0..b.len() {
|
||||
c.push(Scalar::zero());
|
||||
}
|
||||
unsafe {
|
||||
matrix_vec_mod_mult(
|
||||
a as *const _ as *const Scalar,
|
||||
b as *const _ as *const Scalar,
|
||||
c.as_mut_slice() as *mut _ as *mut Scalar,
|
||||
b.len(),
|
||||
device_id,
|
||||
);
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
|
||||
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
|
||||
unsafe { buf_cpy.copy_from(buf) };
|
||||
return buf_cpy;
|
||||
}
|
||||
|
||||
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
|
||||
let rng: Box<dyn RngCore> = match seed {
|
||||
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
|
||||
None => Box::new(rand::thread_rng()),
|
||||
};
|
||||
rng
|
||||
}
|
||||
|
||||
fn set_up_device() {
|
||||
// Set up the context, load the module, and create a stream to run kernels in.
|
||||
rustacuda::init(CudaFlags::empty()).unwrap();
|
||||
let device = Device::get_device(0).unwrap();
|
||||
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
|
||||
}
|
||||
|
||||
pub fn generate_random_points(
|
||||
count: usize,
|
||||
mut rng: Box<dyn RngCore>,
|
||||
) -> Vec<PointAffineNoInfinity> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BN254::rand(&mut rng)).to_xy_strip_z())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
|
||||
(0..count)
|
||||
.map(|_| Point::from_ark(G1Projective_BN254::rand(&mut rng)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
|
||||
(0..count)
|
||||
.map(|_| Scalar::from_ark(Fr_BN254::rand(&mut rng).into_repr()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalar
|
||||
let vector = generate_random_points_proj(test_size, get_rng(seed));
|
||||
let mut vector_mut = vector.clone();
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
|
||||
set_up_device();
|
||||
|
||||
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
|
||||
|
||||
let seed = Some(0); // fix the rng to get two equal scalars
|
||||
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
|
||||
(vector_mut, d_vector, d_domain)
|
||||
}
|
||||
|
||||
4
bn254/src/lib.rs
Normal file
4
bn254/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod test_bn254;
|
||||
pub mod basic_structs;
|
||||
pub mod from_cuda;
|
||||
pub mod curve_structs;
|
||||
816
bn254/src/test_bn254.rs
Normal file
816
bn254/src/test_bn254.rs
Normal file
@@ -0,0 +1,816 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use ark_std::UniformRand;
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda::CudaFlags;
|
||||
use rustacuda::memory::DeviceBox;
|
||||
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
|
||||
use rustacuda_core::DevicePointer;
|
||||
use std::mem::transmute;
|
||||
pub use crate::basic_structs::scalar::ScalarTrait;
|
||||
pub use crate::curve_structs::*;
|
||||
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
|
||||
use rustacuda::memory::{CopyDestination, DeviceCopy};
|
||||
|
||||
|
||||
impl Scalar {
|
||||
pub fn to_biginteger254(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn to_ark(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_biginteger256(ark: BigInteger256) -> Self {
|
||||
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
|
||||
}
|
||||
|
||||
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
|
||||
unsafe { transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
|
||||
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
|
||||
}
|
||||
|
||||
pub fn to_ark_transmute(&self) -> Fr_BN254 {
|
||||
unsafe { std::mem::transmute(*self) }
|
||||
}
|
||||
|
||||
pub fn from_ark_transmute(v: &Fr_BN254) -> Scalar {
|
||||
unsafe { std::mem::transmute_copy(v) }
|
||||
}
|
||||
|
||||
pub fn to_ark_mod_p(&self) -> Fr_BN254 {
|
||||
Fr_BN254::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> Fr_BN254 {
|
||||
Fr_BN254::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
|
||||
}
|
||||
|
||||
pub fn from_ark(v: BigInteger256) -> Scalar {
|
||||
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Base {
|
||||
pub fn to_ark(&self) -> BigInteger256 {
|
||||
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: BigInteger256) -> Self {
|
||||
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Point {
|
||||
pub fn to_ark(&self) -> G1Projective_BN254 {
|
||||
self.to_ark_affine().into_projective()
|
||||
}
|
||||
|
||||
pub fn to_ark_affine(&self) -> G1Affine_BN254 {
|
||||
//TODO: generic conversion
|
||||
use ark_ff::Field;
|
||||
use std::ops::Mul;
|
||||
let proj_x_field = Fq_BN254::from_le_bytes_mod_order(&self.x.to_bytes_le());
|
||||
let proj_y_field = Fq_BN254::from_le_bytes_mod_order(&self.y.to_bytes_le());
|
||||
let proj_z_field = Fq_BN254::from_le_bytes_mod_order(&self.z.to_bytes_le());
|
||||
let inverse_z = proj_z_field.inverse().unwrap();
|
||||
let aff_x = proj_x_field.mul(inverse_z);
|
||||
let aff_y = proj_y_field.mul(inverse_z);
|
||||
G1Affine_BN254::new(aff_x, aff_y, false)
|
||||
}
|
||||
|
||||
pub fn from_ark(ark: G1Projective_BN254) -> Point {
|
||||
use ark_ff::Field;
|
||||
let z_inv = ark.z.inverse().unwrap();
|
||||
let z_invsq = z_inv * z_inv;
|
||||
let z_invq3 = z_invsq * z_inv;
|
||||
Point {
|
||||
x: Base::from_ark((ark.x * z_invsq).into_repr()),
|
||||
y: Base::from_ark((ark.y * z_invq3).into_repr()),
|
||||
z: Base::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PointAffineNoInfinity {
|
||||
|
||||
pub fn to_ark(&self) -> G1Affine_BN254 {
|
||||
G1Affine_BN254::new(Fq_BN254::new(self.x.to_ark()), Fq_BN254::new(self.y.to_ark()), false)
|
||||
}
|
||||
|
||||
pub fn to_ark_repr(&self) -> G1Affine_BN254 {
|
||||
G1Affine_BN254::new(
|
||||
Fq_BN254::from_repr(self.x.to_ark()).unwrap(),
|
||||
Fq_BN254::from_repr(self.y.to_ark()).unwrap(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn from_ark(p: &G1Affine_BN254) -> Self {
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(p.x.into_repr()),
|
||||
y: Base::from_ark(p.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Point {
|
||||
pub fn to_affine(&self) -> PointAffineNoInfinity {
|
||||
let ark_affine = self.to_ark_affine();
|
||||
PointAffineNoInfinity {
|
||||
x: Base::from_ark(ark_affine.x.into_repr()),
|
||||
y: Base::from_ark(ark_affine.y.into_repr()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_bn254 {
|
||||
use std::ops::Add;
|
||||
use ark_bn254::{Fr, G1Affine, G1Projective};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::{FftField, Field, Zero, PrimeField};
|
||||
use ark_std::UniformRand;
|
||||
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
|
||||
use crate::curve_structs::{Point, Scalar, Base};
|
||||
use crate::basic_structs::scalar::ScalarTrait;
|
||||
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
|
||||
|
||||
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let mut points_ga: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..nof_elements {
|
||||
let aff = G1Projective::rand(&mut rng);
|
||||
points_ga.push(aff);
|
||||
}
|
||||
points_ga
|
||||
}
|
||||
|
||||
fn ecntt_arc_naive(
|
||||
points: &Vec<G1Projective>,
|
||||
size: usize,
|
||||
inverse: bool,
|
||||
) -> Vec<G1Projective> {
|
||||
let mut result: Vec<G1Projective> = Vec::new();
|
||||
for _ in 0..size {
|
||||
result.push(G1Projective::zero());
|
||||
}
|
||||
let rou: Fr;
|
||||
if !inverse {
|
||||
rou = Fr::get_root_of_unity(size).unwrap();
|
||||
} else {
|
||||
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
|
||||
}
|
||||
for k in 0..size {
|
||||
for l in 0..size {
|
||||
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
|
||||
let mul_rou = Fr::pow(&rou, &pow);
|
||||
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
|
||||
}
|
||||
}
|
||||
if inverse {
|
||||
let size2 = size as u64;
|
||||
for k in 0..size {
|
||||
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
|
||||
result[k] = result[k].into_affine().mul(multfactor);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
|
||||
let mut eq = true;
|
||||
for i in 0..points.len() {
|
||||
if points2[i].ne(&points[i]) {
|
||||
eq = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return eq;
|
||||
}
|
||||
|
||||
fn test_naive_ark_ecntt(size: usize) {
|
||||
let points = random_points_ark_proj(size);
|
||||
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
|
||||
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
|
||||
assert!(!check_eq(&result2, &result1));
|
||||
assert!(check_eq(&result2, &points));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msm() {
|
||||
let test_sizes = [6, 9];
|
||||
|
||||
for pow2 in test_sizes {
|
||||
let count = 1 << pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points = generate_random_points(count, get_rng(seed));
|
||||
let scalars = generate_random_scalars(count, get_rng(seed));
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
|
||||
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
|
||||
|
||||
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
|
||||
assert_eq!(msm_result.to_ark(), msm_result_ark);
|
||||
assert_eq!(
|
||||
msm_result.to_ark_affine(),
|
||||
Point::from_ark(msm_result_ark).to_ark_affine()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_msm() {
|
||||
for batch_pow2 in [2, 4] {
|
||||
for pow2 in [4, 6] {
|
||||
let msm_size = 1 << pow2;
|
||||
let batch_size = 1 << batch_pow2;
|
||||
let seed = None; // set Some to provide seed
|
||||
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
|
||||
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
|
||||
|
||||
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
|
||||
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
|
||||
|
||||
let expected: Vec<_> = point_r_ark
|
||||
.chunks(msm_size)
|
||||
.zip(scalars_r_ark.chunks(msm_size))
|
||||
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
|
||||
.collect();
|
||||
|
||||
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
|
||||
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
let test_size = 1 << 8;
|
||||
let seed = Some(0);
|
||||
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
|
||||
let mut points = generate_random_points(test_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm(&points, &scalars, 0);
|
||||
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
|
||||
let mut h_commit_result = Point::zero();
|
||||
d_commit_result.copy_to(&mut h_commit_result).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
assert_ne!(msm_result, Point::zero());
|
||||
assert_ne!(h_commit_result, Point::zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_commit() {
|
||||
let batch_size = 4;
|
||||
let test_size = 1 << 12;
|
||||
let seed = Some(0);
|
||||
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
|
||||
let points = generate_random_points(test_size * batch_size, get_rng(seed));
|
||||
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
|
||||
|
||||
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
|
||||
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
|
||||
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
|
||||
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
|
||||
|
||||
assert_eq!(msm_result, h_commit_result);
|
||||
for h in h_commit_result {
|
||||
assert_ne!(h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 3;
|
||||
|
||||
let scalars = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let mut ntt_result = scalars.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
assert_ne!(ntt_result, scalars);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
intt(&mut intt_result, 0);
|
||||
|
||||
assert_eq!(intt_result, scalars);
|
||||
|
||||
//ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
|
||||
|
||||
test_naive_ark_ecntt(test_size);
|
||||
|
||||
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
|
||||
|
||||
//naive ark
|
||||
let points_proj_ark = points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark())
|
||||
.collect::<Vec<G1Projective>>();
|
||||
|
||||
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
|
||||
|
||||
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
|
||||
|
||||
assert_eq!(points_proj_ark, iecntt_result_naive);
|
||||
|
||||
//ingo gpu
|
||||
let mut ecntt_result = points_proj.to_vec();
|
||||
ecntt(&mut ecntt_result, 0);
|
||||
|
||||
assert_ne!(ecntt_result, points_proj);
|
||||
|
||||
let mut iecntt_result = ecntt_result.clone();
|
||||
iecntt(&mut iecntt_result, 0);
|
||||
|
||||
assert_eq!(
|
||||
iecntt_result_naive,
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
assert_eq!(
|
||||
iecntt_result
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>(),
|
||||
points_proj
|
||||
.iter()
|
||||
.map(|p| p.to_ark_affine())
|
||||
.collect::<Vec<G1Affine>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ntt_batch() {
|
||||
//NTT
|
||||
let seed = None; //some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let batches = 4;
|
||||
|
||||
let scalars_batch: Vec<Scalar> =
|
||||
generate_random_scalars(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result = scalars_batch.clone();
|
||||
|
||||
// do batch ntt
|
||||
ntt_batch(&mut ntt_result, test_size, 0);
|
||||
|
||||
let mut ntt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do ntt for every chunk
|
||||
for i in 0..batches {
|
||||
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
|
||||
ntt(&mut ntt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_vec_of_vec[i],
|
||||
ntt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
// check that ntt output is different from input
|
||||
assert_ne!(ntt_result, scalars_batch);
|
||||
|
||||
let mut intt_result = ntt_result.clone();
|
||||
|
||||
// do batch intt
|
||||
intt_batch(&mut intt_result, test_size, 0);
|
||||
|
||||
let mut intt_result_vec_of_vec = Vec::new();
|
||||
|
||||
// do intt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
|
||||
intt(&mut intt_result_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the intt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_vec_of_vec[i],
|
||||
intt_result[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result, scalars_batch);
|
||||
|
||||
// //ECNTT
|
||||
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
|
||||
|
||||
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
|
||||
}
|
||||
|
||||
let mut ntt_result_points = points_proj.clone();
|
||||
|
||||
// do batch ecintt
|
||||
ecntt_batch(&mut ntt_result_points, test_size, 0);
|
||||
|
||||
let mut ntt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
for i in 0..batches {
|
||||
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
|
||||
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
ntt_result_points_vec_of_vec[i],
|
||||
ntt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_ne!(ntt_result_points, points_proj);
|
||||
|
||||
let mut intt_result_points = ntt_result_points.clone();
|
||||
|
||||
// do batch ecintt
|
||||
iecntt_batch(&mut intt_result_points, test_size, 0);
|
||||
|
||||
let mut intt_result_points_vec_of_vec = Vec::new();
|
||||
|
||||
// do ecintt for every chunk
|
||||
for i in 0..batches {
|
||||
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
|
||||
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
|
||||
}
|
||||
|
||||
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i in 0..batches {
|
||||
assert_eq!(
|
||||
intt_result_points_vec_of_vec[i],
|
||||
intt_result_points[i * test_size..(i + 1) * test_size]
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(intt_result_points, points_proj);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_interpolation() {
|
||||
let log_test_size = 7;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
|
||||
intt(&mut evals_mut, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 10;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_scalars_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
intt_batch(&mut evals_mut, test_size, 0);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, evals_mut);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_interpolation() {
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
|
||||
|
||||
reverse_order_points(&mut d_evals);
|
||||
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
|
||||
iecntt(&mut evals_mut[..], 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_interpolation() {
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
|
||||
|
||||
reverse_order_points_batch(&mut d_evals, batch_size);
|
||||
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
|
||||
iecntt_batch(&mut evals_mut[..], test_size, 0);
|
||||
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, *evals_mut);
|
||||
for h in h_coeffs.iter() {
|
||||
assert_ne!(*h, Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation() {
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size.. (1 << log_test_domain_size) {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation() {
|
||||
let batch_size = 6;
|
||||
let log_test_domain_size = 8;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 6;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation() {
|
||||
let log_test_domain_size = 7;
|
||||
let coeff_size = 1 << 7;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
|
||||
for i in coeff_size..(1 << log_test_domain_size) {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[i]);
|
||||
}
|
||||
for i in 0..coeff_size {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation() {
|
||||
let batch_size = 4;
|
||||
let log_test_domain_size = 6;
|
||||
let domain_size = 1 << log_test_domain_size;
|
||||
let coeff_size = 1 << 5;
|
||||
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
|
||||
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
|
||||
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
|
||||
|
||||
for j in 0..batch_size {
|
||||
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
|
||||
for i in coeff_size..domain_size {
|
||||
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
|
||||
}
|
||||
for i in j * domain_size..(j * domain_size + coeff_size) {
|
||||
assert_ne!(h_coeffs_domain[i], Point::zero());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_trivial_coset() {
|
||||
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
|
||||
let log_test_domain_size = 8;
|
||||
let coeff_size = 1 << 6;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
|
||||
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
|
||||
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
|
||||
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_coeffs, h_evals_coset);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scalar_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 4;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let log_test_size = 8;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
|
||||
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
assert_eq!(h_evals[..], h_evals_large[..test_size]);
|
||||
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
|
||||
for i in 0..test_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_point_batch_evaluation_on_coset() {
|
||||
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
|
||||
let batch_size = 2;
|
||||
let log_test_size = 6;
|
||||
let test_size = 1 << log_test_size;
|
||||
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
|
||||
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
|
||||
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
|
||||
|
||||
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
|
||||
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
|
||||
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
|
||||
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals.copy_to(&mut h_evals[..]).unwrap();
|
||||
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
|
||||
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
|
||||
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
|
||||
|
||||
for i in 0..batch_size {
|
||||
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
|
||||
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
|
||||
}
|
||||
for i in 0..test_size * batch_size {
|
||||
assert_ne!(h_evals[i], Point::zero());
|
||||
assert_ne!(h_evals_coset[i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i], Point::zero());
|
||||
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
|
||||
}
|
||||
}
|
||||
|
||||
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
|
||||
#[test]
|
||||
fn test_matrix_multiplication() {
|
||||
let seed = None; // some value to fix the rng
|
||||
let test_size = 1 << 5;
|
||||
let rou = Fr::get_root_of_unity(test_size).unwrap();
|
||||
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|
||||
|row_num| { (0..test_size).map(
|
||||
|col_num| {
|
||||
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
|
||||
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
|
||||
}).collect::<Vec<Scalar>>()
|
||||
}).flatten().collect::<Vec<_>>();
|
||||
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
|
||||
|
||||
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
|
||||
let mut ntt_result = vector.clone();
|
||||
ntt(&mut ntt_result, 0);
|
||||
|
||||
// we don't use the same roots of unity as arkworks, so the results are permutations
|
||||
// of one another and the only guaranteed fixed scalars are the following ones:
|
||||
assert_eq!(result[0], ntt_result[0]);
|
||||
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_scalar_mul() {
|
||||
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
|
||||
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
mult_sc_vec(&mut intoo, &expected, 0);
|
||||
assert_eq!(intoo, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
fn test_vec_point_mul() {
|
||||
let dummy_one = Point {
|
||||
x: Base::one(),
|
||||
y: Base::one(),
|
||||
z: Base::one(),
|
||||
};
|
||||
|
||||
let mut inout = [dummy_one, dummy_one, Point::zero()];
|
||||
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
|
||||
let expected = [dummy_one, Point::zero(), Point::zero()];
|
||||
multp_vec(&mut inout, &scalars, 0);
|
||||
assert_eq!(inout, expected);
|
||||
}
|
||||
}
|
||||
@@ -90,36 +90,36 @@ def get_config_file_content(modolus_p, bit_count_p, limb_p, ntt_size, modolus_q,
|
||||
|
||||
# Create Cuda interface
|
||||
|
||||
newpath = "./icicle/curves/"+curve_name
|
||||
newpath = "./icicle-cuda/curves/"+curve_name
|
||||
if not os.path.exists(newpath):
|
||||
os.makedirs(newpath)
|
||||
|
||||
fc = get_config_file_content(modolus_p, bit_count_p, limb_p, ntt_size, modolus_q, bit_count_q, limb_q, weierstrass_b)
|
||||
text_file = open("./icicle/curves/"+curve_name+"/params.cuh", "w")
|
||||
text_file = open("./icicle-cuda/curves/"+curve_name+"/params.cuh", "w")
|
||||
n = text_file.write(fc)
|
||||
text_file.close()
|
||||
|
||||
with open("./icicle/curves/curve_template/lde.cu", "r") as lde_file:
|
||||
with open("./icicle-cuda/curves/curve_template/lde.cu", "r") as lde_file:
|
||||
content = lde_file.read()
|
||||
content = content.replace("CURVE_NAME_U",curve_name.upper())
|
||||
content = content.replace("CURVE_NAME_L",curve_name.lower())
|
||||
text_file = open("./icicle/curves/"+curve_name+"/lde.cu", "w")
|
||||
text_file = open("./icicle-cuda/curves/"+curve_name+"/lde.cu", "w")
|
||||
n = text_file.write(content)
|
||||
text_file.close()
|
||||
|
||||
with open("./icicle/curves/curve_template/msm.cu", "r") as msm_file:
|
||||
with open("./icicle-cuda/curves/curve_template/msm.cu", "r") as msm_file:
|
||||
content = msm_file.read()
|
||||
content = content.replace("CURVE_NAME_U",curve_name.upper())
|
||||
content = content.replace("CURVE_NAME_L",curve_name.lower())
|
||||
text_file = open("./icicle/curves/"+curve_name+"/msm.cu", "w")
|
||||
text_file = open("./icicle-cuda/curves/"+curve_name+"/msm.cu", "w")
|
||||
n = text_file.write(content)
|
||||
text_file.close()
|
||||
|
||||
with open("./icicle/curves/curve_template/ve_mod_mult.cu", "r") as ve_mod_mult_file:
|
||||
with open("./icicle-cuda/curves/curve_template/ve_mod_mult.cu", "r") as ve_mod_mult_file:
|
||||
content = ve_mod_mult_file.read()
|
||||
content = content.replace("CURVE_NAME_U",curve_name.upper())
|
||||
content = content.replace("CURVE_NAME_L",curve_name.lower())
|
||||
text_file = open("./icicle/curves/"+curve_name+"/ve_mod_mult.cu", "w")
|
||||
text_file = open("./icicle-cuda/curves/"+curve_name+"/ve_mod_mult.cu", "w")
|
||||
n = text_file.write(content)
|
||||
text_file.close()
|
||||
|
||||
@@ -132,7 +132,7 @@ namespace = '#include "params.cuh"\n'+'''namespace CURVE_NAME_U {
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
}'''
|
||||
|
||||
with open('./icicle/curves/'+curve_name+'/curve_config.cuh', 'w') as f:
|
||||
with open('./icicle-cuda/curves/'+curve_name+'/curve_config.cuh', 'w') as f:
|
||||
f.write(namespace.replace("CURVE_NAME_U",curve_name.upper()))
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ extern "C" bool eq_CURVE_NAME_L(CURVE_NAME_U::projective_t *point1, CURVE_NAME_U
|
||||
return (*point1 == *point2);
|
||||
}'''
|
||||
|
||||
with open('./icicle/curves/'+curve_name+'/projective.cu', 'w') as f:
|
||||
with open('./icicle-cuda/curves/'+curve_name+'/projective.cu', 'w') as f:
|
||||
f.write(eq.replace("CURVE_NAME_U",curve_name.upper()).replace("CURVE_NAME_L",curve_name.lower()))
|
||||
|
||||
supported_operations = '''
|
||||
@@ -155,10 +155,10 @@ supported_operations = '''
|
||||
#include "ve_mod_mult.cu"
|
||||
'''
|
||||
|
||||
with open('./icicle/curves/'+curve_name+'/supported_operations.cu', 'w') as f:
|
||||
with open('./icicle-cuda/curves/'+curve_name+'/supported_operations.cu', 'w') as f:
|
||||
f.write(supported_operations.replace("CURVE_NAME_U",curve_name.upper()).replace("CURVE_NAME_L",curve_name.lower()))
|
||||
|
||||
with open('./icicle/curves/index.cu', 'a') as f:
|
||||
with open('./icicle-cuda/curves/index.cu', 'a') as f:
|
||||
f.write('\n#include "'+curve_name.lower()+'/supported_operations.cu"')
|
||||
|
||||
|
||||
|
||||
49
icicle-core/Cargo.toml
Normal file
49
icicle-core/Cargo.toml
Normal file
@@ -0,0 +1,49 @@
|
||||
[package]
|
||||
name = "icicle-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = [ "Ingonyama" ]
|
||||
description = "An implementation of the Ingonyama CUDA Library"
|
||||
homepage = "https://www.ingonyama.com"
|
||||
repository = "https://github.com/ingonyama-zk/icicle"
|
||||
|
||||
[[bench]]
|
||||
name = "ntt"
|
||||
path = "benches/ntt.rs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "msm"
|
||||
path = "benches/msm.rs"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
hex = "*"
|
||||
ark-std = "0.3.0"
|
||||
ark-ff = "0.3.0"
|
||||
ark-poly = "0.3.0"
|
||||
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
|
||||
ark-bls12-381 = "0.3.0"
|
||||
ark-bls12-377 = "0.3.0"
|
||||
ark-bn254 = "0.3.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_derive = "1.0"
|
||||
serde_cbor = "0.11.2"
|
||||
|
||||
rustacuda = "0.1"
|
||||
rustacuda_core = "0.1"
|
||||
rustacuda_derive = "0.1"
|
||||
|
||||
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
"criterion" = "0.4.0"
|
||||
|
||||
[features]
|
||||
default = ["bls12-381"]
|
||||
bls12-381 = ["ark-bls12-381/curve"]
|
||||
g2 = []
|
||||
4
icicle-core/src/basic_structs/field.rs
Normal file
4
icicle-core/src/basic_structs/field.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub trait Field<const NUM_LIMBS: usize> {
|
||||
const MODOLUS: [u32;NUM_LIMBS];
|
||||
const LIMBS: usize = NUM_LIMBS;
|
||||
}
|
||||
3
icicle-core/src/basic_structs/mod.rs
Normal file
3
icicle-core/src/basic_structs/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod field;
|
||||
pub mod scalar;
|
||||
pub mod point;
|
||||
108
icicle-core/src/basic_structs/point.rs
Normal file
108
icicle-core/src/basic_structs/point.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use std::ffi::c_uint;
|
||||
|
||||
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
|
||||
|
||||
use ark_ec::AffineCurve;
|
||||
use ark_ff::{BigInteger256, PrimeField};
|
||||
use std::mem::transmute;
|
||||
use ark_ff::Field;
|
||||
use crate::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
|
||||
use super::scalar::{get_fixed_limbs, self};
|
||||
|
||||
|
||||
#[derive(Debug, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointT<BF: scalar::ScalarTrait> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
pub z: BF,
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
|
||||
fn default() -> Self {
|
||||
PointT::zero()
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn zero() -> Self {
|
||||
PointT {
|
||||
x: BF::zero(),
|
||||
y: BF::one(),
|
||||
z: BF::zero(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn infinity() -> Self {
|
||||
Self::zero()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
|
||||
#[repr(C)]
|
||||
pub struct PointAffineNoInfinityT<BF> {
|
||||
pub x: BF,
|
||||
pub y: BF,
|
||||
}
|
||||
|
||||
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
|
||||
fn default() -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::zero(),
|
||||
y: BF::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
|
||||
///From u32 limbs x,y
|
||||
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
|
||||
PointAffineNoInfinityT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limbs(&self) -> Vec<u32> {
|
||||
[self.x.limbs(), self.y.limbs()].concat()
|
||||
}
|
||||
|
||||
pub fn to_projective(&self) -> PointT<BF> {
|
||||
PointT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
z: BF::one(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
|
||||
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
|
||||
PointT {
|
||||
x: BF::from_limbs(x),
|
||||
y: BF::from_limbs(y),
|
||||
z: BF::from_limbs(z)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
|
||||
let l = value.len();
|
||||
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
|
||||
PointT {
|
||||
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
|
||||
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
|
||||
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
|
||||
PointAffineNoInfinityT {
|
||||
x: self.x,
|
||||
y: self.y,
|
||||
}
|
||||
}
|
||||
}
|
||||
102
icicle-core/src/basic_structs/scalar.rs
Normal file
102
icicle-core/src/basic_structs/scalar.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::ffi::{c_int, c_uint};
|
||||
use rand::{rngs::StdRng, RngCore, SeedableRng};
|
||||
use rustacuda_core::DeviceCopy;
|
||||
use rustacuda_derive::DeviceCopy;
|
||||
use std::mem::transmute;
|
||||
use rustacuda::prelude::*;
|
||||
use rustacuda_core::DevicePointer;
|
||||
use rustacuda::memory::{DeviceBox, CopyDestination};
|
||||
|
||||
use crate::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use super::field::{Field, self};
|
||||
|
||||
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
|
||||
match val.len() {
|
||||
n if n < NUM_LIMBS => {
|
||||
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
|
||||
padded[..val.len()].copy_from_slice(&val);
|
||||
padded
|
||||
}
|
||||
n if n == NUM_LIMBS => val.try_into().unwrap(),
|
||||
_ => panic!("slice has too many elements"),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ScalarTrait{
|
||||
fn base_limbs() -> usize;
|
||||
fn zero() -> Self;
|
||||
fn from_limbs(value: &[u32]) -> Self;
|
||||
fn one() -> Self;
|
||||
fn to_bytes_le(&self) -> Vec<u8>;
|
||||
fn limbs(&self) -> &[u32];
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
#[repr(C)]
|
||||
pub struct ScalarT<M, const NUM_LIMBS: usize> {
|
||||
pub(crate) phantom: PhantomData<M>,
|
||||
pub(crate) value : [u32; NUM_LIMBS]
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
|
||||
where
|
||||
M: Field<NUM_LIMBS>,
|
||||
{
|
||||
|
||||
fn base_limbs() -> usize {
|
||||
return NUM_LIMBS;
|
||||
}
|
||||
|
||||
fn zero() -> Self {
|
||||
ScalarT {
|
||||
value: [0u32; NUM_LIMBS],
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_limbs(value: &[u32]) -> Self {
|
||||
Self {
|
||||
value: get_fixed_limbs(value),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn one() -> Self {
|
||||
let mut s = [0u32; NUM_LIMBS];
|
||||
s[0] = 1;
|
||||
ScalarT { value: s, phantom: PhantomData }
|
||||
}
|
||||
|
||||
fn to_bytes_le(&self) -> Vec<u8> {
|
||||
self.value
|
||||
.iter()
|
||||
.map(|s| s.to_le_bytes().to_vec())
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn limbs(&self) -> &[u32] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
|
||||
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
Self::from_limbs(value)
|
||||
}
|
||||
|
||||
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
|
||||
let mut value = value.to_vec();
|
||||
value.reverse();
|
||||
Self::from_limbs_le(&value)
|
||||
}
|
||||
|
||||
// Additional Functions
|
||||
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
|
||||
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
|
||||
}
|
||||
}
|
||||
2
icicle-core/src/lib.rs
Normal file
2
icicle-core/src/lib.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod utils;
|
||||
pub mod basic_structs;
|
||||
42
icicle-core/src/utils.rs
Normal file
42
icicle-core/src/utils.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use rand::RngCore;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
|
||||
pub fn from_limbs<T>(limbs: Vec<u32>, chunk_size: usize, f: fn(&[u32]) -> T) -> Vec<T> {
|
||||
let points = limbs
|
||||
.chunks(chunk_size)
|
||||
.map(|lmbs| f(lmbs))
|
||||
.collect::<Vec<T>>();
|
||||
points
|
||||
}
|
||||
|
||||
pub fn u32_vec_to_u64_vec(arr_u32: &[u32]) -> Vec<u64> {
|
||||
let len = (arr_u32.len() / 2) as usize;
|
||||
let mut arr_u64 = vec![0u64; len];
|
||||
|
||||
for i in 0..len {
|
||||
arr_u64[i] = u64::from(arr_u32[i * 2]) | (u64::from(arr_u32[i * 2 + 1]) << 32);
|
||||
}
|
||||
|
||||
arr_u64
|
||||
}
|
||||
|
||||
pub fn u64_vec_to_u32_vec(arr_u64: &[u64]) -> Vec<u32> {
|
||||
let len = arr_u64.len() * 2;
|
||||
let mut arr_u32 = vec![0u32; len];
|
||||
|
||||
for i in 0..arr_u64.len() {
|
||||
arr_u32[i * 2] = arr_u64[i] as u32;
|
||||
arr_u32[i * 2 + 1] = (arr_u64[i] >> 32) as u32;
|
||||
}
|
||||
|
||||
arr_u32
|
||||
}
|
||||
|
||||
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> { //TOOD: this func is universal
|
||||
let rng: Box<dyn RngCore> = match seed {
|
||||
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
|
||||
None => Box::new(rand::thread_rng()),
|
||||
};
|
||||
rng
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
#include "../../utils/storage.cuh"
|
||||
namespace PARAMS_BLS12_377{
|
||||
namespace PARAMS{
|
||||
struct fp_config{
|
||||
static constexpr unsigned limbs_count = 8;
|
||||
static constexpr storage<limbs_count> modulus = {0x00000001, 0x0a118000, 0xd0000001, 0x59aa76fe, 0x5c37b001, 0x60b44d1e, 0x9a2ca556, 0x12ab655e};
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
#include "../../utils/storage.cuh"
|
||||
|
||||
namespace PARAMS_BLS12_381{
|
||||
namespace PARAMS{
|
||||
struct fp_config {
|
||||
// field structure size = 8 * 32 bit
|
||||
static constexpr unsigned limbs_count = 8;
|
||||
@@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
#include "../../utils/storage.cuh"
|
||||
namespace PARAMS_BN254{
|
||||
namespace PARAMS{
|
||||
struct fp_config{
|
||||
static constexpr unsigned limbs_count = 8;
|
||||
|
||||
28
icicle-cuda/curves/curve_config.cuh
Normal file
28
icicle-cuda/curves/curve_config.cuh
Normal file
@@ -0,0 +1,28 @@
|
||||
#pragma once
|
||||
|
||||
#include "../primitives/field.cuh"
|
||||
#include "../primitives/projective.cuh"
|
||||
|
||||
#if defined(FEATURE_BLS12_381)
|
||||
#include "bls12_381/params.cuh"
|
||||
#elif defined(FEATURE_BLS12_377)
|
||||
#include "bls12_377/params.cuh"
|
||||
#elif defined(FEATURE_BN254)
|
||||
#include "bn254/params.cuh"
|
||||
#else
|
||||
# error "no FEATURE"
|
||||
#endif
|
||||
|
||||
typedef Field<PARAMS::fp_config> scalar_field_t;
|
||||
typedef scalar_field_t scalar_t;
|
||||
typedef Field<PARAMS::fq_config> point_field_t;
|
||||
static constexpr point_field_t b = point_field_t{ PARAMS::weierstrass_b };
|
||||
typedef Projective<point_field_t, scalar_field_t, b> projective_t;
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
#if defined(G2_DEFINED)
|
||||
typedef ExtensionField<PARAMS::fq_config> g2_point_field_t;
|
||||
static constexpr g2_point_field_t b_g2 = g2_point_field_t{ point_field_t{ PARAMS::weierstrass_b_g2_re },
|
||||
point_field_t{ PARAMS::weierstrass_b_g2_im }};
|
||||
typedef Projective<g2_point_field_t, scalar_field_t, b_g2> g2_projective_t;
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
#endif
|
||||
@@ -1,20 +1,20 @@
|
||||
#ifndef _BN254_LDE
|
||||
#define _BN254_LDE
|
||||
#ifndef _LDE
|
||||
#define _LDE
|
||||
#include <cuda.h>
|
||||
#include "../../appUtils/ntt/lde.cu"
|
||||
#include "../../appUtils/ntt/ntt.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "../appUtils/ntt/lde.cu"
|
||||
#include "../appUtils/ntt/ntt.cuh"
|
||||
#include "../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "curve_config.cuh"
|
||||
|
||||
extern "C" BN254::scalar_t* build_domain_cuda_bn254(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" scalar_t* build_domain_cuda(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
if (inverse) {
|
||||
return fill_twiddle_factors_array(domain_size, BN254::scalar_t::omega_inv(logn), stream);
|
||||
return fill_twiddle_factors_array(domain_size, scalar_t::omega_inv(logn), stream);
|
||||
} else {
|
||||
return fill_twiddle_factors_array(domain_size, BN254::scalar_t::omega(logn), stream);
|
||||
return fill_twiddle_factors_array(domain_size, scalar_t::omega(logn), stream);
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
@@ -24,12 +24,12 @@ extern "C" BN254::scalar_t* build_domain_cuda_bn254(uint32_t domain_size, uint32
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_cuda_bn254(BN254::scalar_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int ntt_cuda(scalar_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BN254::scalar_t,BN254::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
return ntt_end2end_template<scalar_t,scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
@@ -39,12 +39,12 @@ extern "C" int ntt_cuda_bn254(BN254::scalar_t *arr, uint32_t n, bool inverse, si
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_cuda_bn254(BN254::projective_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int ecntt_cuda(projective_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BN254::projective_t,BN254::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
return ntt_end2end_template<projective_t,scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
@@ -53,12 +53,12 @@ extern "C" int ecntt_cuda_bn254(BN254::projective_t *arr, uint32_t n, bool inver
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_batch_cuda_bn254(BN254::scalar_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int ntt_batch_cuda(scalar_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BN254::scalar_t,BN254::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
return ntt_end2end_batch_template<scalar_t,scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
@@ -67,12 +67,12 @@ extern "C" int ntt_batch_cuda_bn254(BN254::scalar_t *arr, uint32_t arr_size, uin
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_batch_cuda_bn254(BN254::projective_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int ecntt_batch_cuda(projective_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BN254::projective_t,BN254::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
return ntt_end2end_batch_template<projective_t,scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
@@ -81,7 +81,7 @@ extern "C" int ecntt_batch_cuda_bn254(BN254::projective_t *arr, uint32_t arr_siz
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t *d_evaluations, BN254::scalar_t *d_domain, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int interpolate_scalars_cuda(scalar_t* d_out, scalar_t *d_evaluations, scalar_t *d_domain, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -94,7 +94,7 @@ extern "C" int interpolate_scalars_cuda_bn254(BN254::scalar_t* d_out, BN254::sca
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_batch_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t* d_evaluations, BN254::scalar_t* d_domain, unsigned n,
|
||||
extern "C" int interpolate_scalars_batch_cuda(scalar_t* d_out, scalar_t* d_evaluations, scalar_t* d_domain, unsigned n,
|
||||
unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
@@ -109,7 +109,7 @@ extern "C" int interpolate_scalars_batch_cuda_bn254(BN254::scalar_t* d_out, BN25
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t *d_evaluations, BN254::scalar_t *d_domain, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int interpolate_points_cuda(projective_t* d_out, projective_t *d_evaluations, scalar_t *d_domain, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -122,7 +122,7 @@ extern "C" int interpolate_points_cuda_bn254(BN254::projective_t* d_out, BN254::
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_batch_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t* d_evaluations, BN254::scalar_t* d_domain,
|
||||
extern "C" int interpolate_points_batch_cuda(projective_t* d_out, projective_t* d_evaluations, scalar_t* d_domain,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
@@ -137,12 +137,12 @@ extern "C" int interpolate_points_batch_cuda_bn254(BN254::projective_t* d_out, B
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t *d_coefficients, BN254::scalar_t *d_domain,
|
||||
extern "C" int evaluate_scalars_cuda(scalar_t* d_out, scalar_t *d_coefficients, scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BN254::scalar_t* _null = nullptr;
|
||||
scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
@@ -153,12 +153,12 @@ extern "C" int evaluate_scalars_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_batch_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t* d_coefficients, BN254::scalar_t* d_domain, unsigned domain_size,
|
||||
extern "C" int evaluate_scalars_batch_cuda(scalar_t* d_out, scalar_t* d_coefficients, scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BN254::scalar_t* _null = nullptr;
|
||||
scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
@@ -169,12 +169,12 @@ extern "C" int evaluate_scalars_batch_cuda_bn254(BN254::scalar_t* d_out, BN254::
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t *d_coefficients, BN254::scalar_t *d_domain,
|
||||
extern "C" int evaluate_points_cuda(projective_t* d_out, projective_t *d_coefficients, scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BN254::scalar_t* _null = nullptr;
|
||||
scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
@@ -185,12 +185,12 @@ extern "C" int evaluate_points_cuda_bn254(BN254::projective_t* d_out, BN254::pro
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_batch_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t* d_coefficients, BN254::scalar_t* d_domain, unsigned domain_size,
|
||||
extern "C" int evaluate_points_batch_cuda(projective_t* d_out, projective_t* d_coefficients, scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BN254::scalar_t* _null = nullptr;
|
||||
scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
@@ -201,8 +201,8 @@ extern "C" int evaluate_points_batch_cuda_bn254(BN254::projective_t* d_out, BN25
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t *d_coefficients, BN254::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BN254::scalar_t *coset_powers, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int evaluate_scalars_on_coset_cuda(scalar_t* d_out, scalar_t *d_coefficients, scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, scalar_t *coset_powers, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -216,8 +216,8 @@ extern "C" int evaluate_scalars_on_coset_cuda_bn254(BN254::scalar_t* d_out, BN25
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_batch_cuda_bn254(BN254::scalar_t* d_out, BN254::scalar_t* d_coefficients, BN254::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BN254::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int evaluate_scalars_on_coset_batch_cuda(scalar_t* d_out, scalar_t* d_coefficients, scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -231,8 +231,8 @@ extern "C" int evaluate_scalars_on_coset_batch_cuda_bn254(BN254::scalar_t* d_out
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t *d_coefficients, BN254::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BN254::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int evaluate_points_on_coset_cuda(projective_t* d_out, projective_t *d_coefficients, scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -246,8 +246,8 @@ extern "C" int evaluate_points_on_coset_cuda_bn254(BN254::projective_t* d_out, B
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_batch_cuda_bn254(BN254::projective_t* d_out, BN254::projective_t* d_coefficients, BN254::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BN254::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int evaluate_points_on_coset_batch_cuda(projective_t* d_out, projective_t* d_coefficients, scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -261,7 +261,7 @@ extern "C" int evaluate_points_on_coset_batch_cuda_bn254(BN254::projective_t* d_
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_cuda_bn254(BN254::scalar_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int reverse_order_scalars_cuda(scalar_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -277,7 +277,7 @@ extern "C" int reverse_order_scalars_cuda_bn254(BN254::scalar_t* arr, int n, siz
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_batch_cuda_bn254(BN254::scalar_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int reverse_order_scalars_batch_cuda(scalar_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -293,7 +293,7 @@ extern "C" int reverse_order_scalars_batch_cuda_bn254(BN254::scalar_t* arr, int
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_cuda_bn254(BN254::projective_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int reverse_order_points_cuda(projective_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -309,7 +309,7 @@ extern "C" int reverse_order_points_cuda_bn254(BN254::projective_t* arr, int n,
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_batch_cuda_bn254(BN254::projective_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int reverse_order_points_batch_cuda(projective_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -1,18 +1,18 @@
|
||||
#ifndef _BN254_MSM
|
||||
#define _BN254_MSM
|
||||
#include "../../appUtils/msm/msm.cu"
|
||||
#ifndef _MSM
|
||||
#define _MSM
|
||||
#include "../appUtils/msm/msm.cu"
|
||||
#include <stdexcept>
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
|
||||
|
||||
extern "C"
|
||||
int msm_cuda_bn254(BN254::projective_t *out, BN254::affine_t points[],
|
||||
BN254::scalar_t scalars[], size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
int msm_cuda(projective_t *out, affine_t points[],
|
||||
scalar_t scalars[], size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm<BN254::scalar_t, BN254::projective_t, BN254::affine_t>(scalars, points, count, out, false, stream);
|
||||
large_msm<scalar_t, projective_t, affine_t>(scalars, points, count, out, false, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
@@ -22,13 +22,13 @@ int msm_cuda_bn254(BN254::projective_t *out, BN254::affine_t points[],
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int msm_batch_cuda_bn254(BN254::projective_t* out, BN254::affine_t points[],
|
||||
BN254::scalar_t scalars[], size_t batch_size, size_t msm_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
extern "C" int msm_batch_cuda(projective_t* out, affine_t points[],
|
||||
scalar_t scalars[], size_t batch_size, size_t msm_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm<BN254::scalar_t, BN254::projective_t, BN254::affine_t>(scalars, points, batch_size, msm_size, out, false, stream);
|
||||
batched_large_msm<scalar_t, projective_t, affine_t>(scalars, points, batch_size, msm_size, out, false, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
@@ -48,7 +48,7 @@ extern "C" int msm_batch_cuda_bn254(BN254::projective_t* out, BN254::affine_t po
|
||||
* @param count Length of `d_scalars` and `d_points` arrays (they should have equal length).
|
||||
*/
|
||||
extern "C"
|
||||
int commit_cuda_bn254(BN254::projective_t* d_out, BN254::scalar_t* d_scalars, BN254::affine_t* d_points, size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
int commit_cuda(projective_t* d_out, scalar_t* d_scalars, affine_t* d_points, size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -73,7 +73,7 @@ extern "C" int msm_batch_cuda_bn254(BN254::projective_t* out, BN254::affine_t po
|
||||
* @param batch_size Size of the batch.
|
||||
*/
|
||||
extern "C"
|
||||
int commit_batch_cuda_bn254(BN254::projective_t* d_out, BN254::scalar_t* d_scalars, BN254::affine_t* d_points, size_t count, size_t batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
int commit_batch_cuda(projective_t* d_out, scalar_t* d_scalars, affine_t* d_points, size_t count, size_t batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
@@ -1,19 +1,19 @@
|
||||
#ifndef _BLS12_381_POSEIDON
|
||||
#define _BLS12_381_POSEIDON
|
||||
#ifndef _POSEIDON
|
||||
#define _POSEIDON
|
||||
#include <cuda.h>
|
||||
#include <stdexcept>
|
||||
#include "../../appUtils/poseidon/poseidon.cu"
|
||||
#include "../appUtils/poseidon/poseidon.cu"
|
||||
#include "curve_config.cuh"
|
||||
|
||||
template class Poseidon<BLS12_381::scalar_t>;
|
||||
template class Poseidon<scalar_t>;
|
||||
|
||||
extern "C" int poseidon_multi_cuda_bls12_381(BLS12_381::scalar_t input[], BLS12_381::scalar_t* out,
|
||||
extern "C" int poseidon_multi_cuda(scalar_t input[], scalar_t* out,
|
||||
size_t number_of_blocks, int arity, size_t device_id = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
Poseidon<BLS12_381::scalar_t> poseidon(arity);
|
||||
poseidon.hash_blocks(input, number_of_blocks, out, Poseidon<BLS12_381::scalar_t>::HashType::MerkleTree);
|
||||
Poseidon<scalar_t> poseidon(arity);
|
||||
poseidon.hash_blocks(input, number_of_blocks, out, Poseidon<scalar_t>::HashType::MerkleTree);
|
||||
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
19
icicle-cuda/curves/projective.cu
Normal file
19
icicle-cuda/curves/projective.cu
Normal file
@@ -0,0 +1,19 @@
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
#include "../primitives/projective.cuh"
|
||||
|
||||
extern "C" bool eq(projective_t *point1, projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == point_field_t::zero()) && (point1->y == point_field_t::zero()) && (point1->z == point_field_t::zero())) &&
|
||||
!((point2->x == point_field_t::zero()) && (point2->y == point_field_t::zero()) && (point2->z == point_field_t::zero()));
|
||||
}
|
||||
|
||||
#if defined(G2_DEFINED)
|
||||
extern "C" bool eq_g2(g2_projective_t *point1, g2_projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == g2_point_field_t::zero()) && (point1->y == g2_point_field_t::zero()) && (point1->z == g2_point_field_t::zero())) &&
|
||||
!((point2->x == g2_point_field_t::zero()) && (point2->y == g2_point_field_t::zero()) && (point2->z == g2_point_field_t::zero()));
|
||||
}
|
||||
#endif
|
||||
@@ -1,16 +1,16 @@
|
||||
#ifndef _BN254_VEC_MULT
|
||||
#define _BN254_VEC_MULT
|
||||
#ifndef _VEC_MULT
|
||||
#define _VEC_MULT
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../utils/storage.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
#include "../primitives/field.cuh"
|
||||
#include "../utils/storage.cuh"
|
||||
#include "../primitives/projective.cuh"
|
||||
#include "curve_config.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
|
||||
|
||||
extern "C" int32_t vec_mod_mult_point_bn254(BN254::projective_t *inout,
|
||||
BN254::scalar_t *scalar_vec,
|
||||
extern "C" int32_t vec_mod_mult_point(projective_t *inout,
|
||||
scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
@@ -20,7 +20,7 @@ extern "C" int32_t vec_mod_mult_point_bn254(BN254::projective_t *inout,
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BN254::projective_t, BN254::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
vector_mod_mult<projective_t, scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
@@ -30,8 +30,8 @@ extern "C" int32_t vec_mod_mult_point_bn254(BN254::projective_t *inout,
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t vec_mod_mult_scalar_bn254(BN254::scalar_t *inout,
|
||||
BN254::scalar_t *scalar_vec,
|
||||
extern "C" int32_t vec_mod_mult_scalar(scalar_t *inout,
|
||||
scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
@@ -41,7 +41,7 @@ extern "C" int32_t vec_mod_mult_scalar_bn254(BN254::scalar_t *inout,
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BN254::scalar_t, BN254::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
vector_mod_mult<scalar_t, scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
@@ -51,9 +51,9 @@ extern "C" int32_t vec_mod_mult_scalar_bn254(BN254::scalar_t *inout,
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t matrix_vec_mod_mult_bn254(BN254::scalar_t *matrix_flattened,
|
||||
BN254::scalar_t *input,
|
||||
BN254::scalar_t *output,
|
||||
extern "C" int32_t matrix_vec_mod_mult(scalar_t *matrix_flattened,
|
||||
scalar_t *input,
|
||||
scalar_t *output,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
@@ -63,7 +63,7 @@ extern "C" int32_t matrix_vec_mod_mult_bn254(BN254::scalar_t *matrix_flattened,
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
matrix_mod_mult<BN254::scalar_t>(matrix_flattened, input, output, n_elments, stream);
|
||||
matrix_mod_mult<scalar_t>(matrix_flattened, input, output, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
@@ -1,22 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
#include "params.cuh"
|
||||
|
||||
namespace BLS12_377 {
|
||||
typedef Field<PARAMS_BLS12_377::fp_config> scalar_field_t;
|
||||
typedef scalar_field_t scalar_t;
|
||||
typedef Field<PARAMS_BLS12_377::fq_config> point_field_t;
|
||||
static constexpr point_field_t b = point_field_t{ PARAMS_BLS12_377::weierstrass_b };
|
||||
typedef Projective<point_field_t, scalar_field_t, b> projective_t;
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
#if defined(G2_DEFINED)
|
||||
typedef ExtensionField<PARAMS_BLS12_377::fq_config> g2_point_field_t;
|
||||
static constexpr g2_point_field_t b_g2 = g2_point_field_t{ point_field_t{ PARAMS_BLS12_377::weierstrass_b_g2_re },
|
||||
point_field_t{ PARAMS_BLS12_377::weierstrass_b_g2_im }};
|
||||
typedef Projective<g2_point_field_t, scalar_field_t, b_g2> g2_projective_t;
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
#endif
|
||||
}
|
||||
@@ -1,327 +0,0 @@
|
||||
#ifndef _BLS12_377_LDE
|
||||
#define _BLS12_377_LDE
|
||||
#include <cuda.h>
|
||||
#include "../../appUtils/ntt/lde.cu"
|
||||
#include "../../appUtils/ntt/ntt.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "curve_config.cuh"
|
||||
|
||||
extern "C" BLS12_377::scalar_t* build_domain_cuda_bls12_377(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
if (inverse) {
|
||||
return fill_twiddle_factors_array(domain_size, BLS12_377::scalar_t::omega_inv(logn), stream);
|
||||
} else {
|
||||
return fill_twiddle_factors_array(domain_size, BLS12_377::scalar_t::omega(logn), stream);
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_cuda_bls12_377(BLS12_377::scalar_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BLS12_377::scalar_t,BLS12_377::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_cuda_bls12_377(BLS12_377::projective_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BLS12_377::projective_t,BLS12_377::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_batch_cuda_bls12_377(BLS12_377::scalar_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BLS12_377::scalar_t,BLS12_377::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_batch_cuda_bls12_377(BLS12_377::projective_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BLS12_377::projective_t,BLS12_377::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t *d_evaluations, BLS12_377::scalar_t *d_domain, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_batch_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t* d_evaluations, BLS12_377::scalar_t* d_domain, unsigned n,
|
||||
unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t *d_evaluations, BLS12_377::scalar_t *d_domain, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_batch_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t* d_evaluations, BLS12_377::scalar_t* d_domain,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t *d_coefficients, BLS12_377::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_377::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_batch_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t* d_coefficients, BLS12_377::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_377::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t *d_coefficients, BLS12_377::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_377::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_batch_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t* d_coefficients, BLS12_377::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_377::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t *d_coefficients, BLS12_377::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BLS12_377::scalar_t *coset_powers, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_batch_cuda_bls12_377(BLS12_377::scalar_t* d_out, BLS12_377::scalar_t* d_coefficients, BLS12_377::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BLS12_377::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t *d_coefficients, BLS12_377::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BLS12_377::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_batch_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::projective_t* d_coefficients, BLS12_377::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BLS12_377::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_cuda_bls12_377(BLS12_377::scalar_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_batch_cuda_bls12_377(BLS12_377::scalar_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_cuda_bls12_377(BLS12_377::projective_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_batch_cuda_bls12_377(BLS12_377::projective_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,93 +0,0 @@
|
||||
#ifndef _BLS12_377_MSM
|
||||
#define _BLS12_377_MSM
|
||||
#include "../../appUtils/msm/msm.cu"
|
||||
#include <stdexcept>
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
|
||||
|
||||
extern "C"
|
||||
int msm_cuda_bls12_377(BLS12_377::projective_t *out, BLS12_377::affine_t points[],
|
||||
BLS12_377::scalar_t scalars[], size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm<BLS12_377::scalar_t, BLS12_377::projective_t, BLS12_377::affine_t>(scalars, points, count, out, false, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int msm_batch_cuda_bls12_377(BLS12_377::projective_t* out, BLS12_377::affine_t points[],
|
||||
BLS12_377::scalar_t scalars[], size_t batch_size, size_t msm_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm<BLS12_377::scalar_t, BLS12_377::projective_t, BLS12_377::affine_t>(scalars, points, batch_size, msm_size, out, false, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a polynomial using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the result to.
|
||||
* @param d_scalars Scalars for the MSM. Must be on device.
|
||||
* @param d_points Points for the MSM. Must be on device.
|
||||
* @param count Length of `d_scalars` and `d_points` arrays (they should have equal length).
|
||||
*/
|
||||
extern "C"
|
||||
int commit_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::scalar_t* d_scalars, BLS12_377::affine_t* d_points, size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm<BLS12_377::scalar_t, BLS12_377::projective_t, BLS12_377::affine_t>(d_scalars, d_points, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a batch of polynomials using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the results to.
|
||||
* @param d_scalars Scalars for the MSMs of all polynomials. Must be on device.
|
||||
* @param d_points Points for the MSMs. Must be on device. It is assumed that this set of bases is used for each MSM.
|
||||
* @param count Length of `d_points` array, `d_scalar` has length `count` * `batch_size`.
|
||||
* @param batch_size Size of the batch.
|
||||
*/
|
||||
extern "C"
|
||||
int commit_batch_cuda_bls12_377(BLS12_377::projective_t* d_out, BLS12_377::scalar_t* d_scalars, BLS12_377::affine_t* d_points, size_t count, size_t batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm(d_scalars, d_points, batch_size, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
|
||||
#include <cuda.h>
|
||||
|
||||
#include "curve_config.cuh"
|
||||
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
extern "C" bool eq_bls12_377(BLS12_377::projective_t *point1, BLS12_377::projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BLS12_377::point_field_t::zero()) && (point1->y == BLS12_377::point_field_t::zero()) && (point1->z == BLS12_377::point_field_t::zero())) &&
|
||||
!((point2->x == BLS12_377::point_field_t::zero()) && (point2->y == BLS12_377::point_field_t::zero()) && (point2->z == BLS12_377::point_field_t::zero()));
|
||||
}
|
||||
|
||||
#if defined(G2_DEFINED)
|
||||
extern "C" bool eq_g2_bls12_377(BLS12_377::g2_projective_t *point1, BLS12_377::g2_projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BLS12_377::g2_point_field_t::zero()) && (point1->y == BLS12_377::g2_point_field_t::zero()) && (point1->z == BLS12_377::g2_point_field_t::zero())) &&
|
||||
!((point2->x == BLS12_377::g2_point_field_t::zero()) && (point2->y == BLS12_377::g2_point_field_t::zero()) && (point2->z == BLS12_377::g2_point_field_t::zero()));
|
||||
}
|
||||
#endif
|
||||
@@ -1,5 +0,0 @@
|
||||
|
||||
#include "projective.cu"
|
||||
#include "lde.cu"
|
||||
#include "msm.cu"
|
||||
#include "ve_mod_mult.cu"
|
||||
@@ -1,69 +0,0 @@
|
||||
#ifndef _BLS12_377_VEC_MULT
|
||||
#define _BLS12_377_VEC_MULT
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../utils/storage.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
#include "curve_config.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
|
||||
|
||||
extern "C" int32_t vec_mod_mult_point_bls12_377(BLS12_377::projective_t *inout,
|
||||
BLS12_377::scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BLS12_377::projective_t, BLS12_377::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t vec_mod_mult_scalar_bls12_377(BLS12_377::scalar_t *inout,
|
||||
BLS12_377::scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BLS12_377::scalar_t, BLS12_377::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t matrix_vec_mod_mult_bls12_377(BLS12_377::scalar_t *matrix_flattened,
|
||||
BLS12_377::scalar_t *input,
|
||||
BLS12_377::scalar_t *output,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
matrix_mod_mult<BLS12_377::scalar_t>(matrix_flattened, input, output, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
#include "params.cuh"
|
||||
|
||||
namespace BLS12_381 {
|
||||
typedef Field<PARAMS_BLS12_381::fp_config> scalar_field_t;
|
||||
typedef scalar_field_t scalar_t;
|
||||
typedef Field<PARAMS_BLS12_381::fq_config> point_field_t;
|
||||
static constexpr point_field_t b = point_field_t{ PARAMS_BLS12_381::weierstrass_b };
|
||||
typedef Projective<point_field_t, scalar_field_t, b> projective_t;
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
#if defined(G2_DEFINED)
|
||||
typedef ExtensionField<PARAMS_BLS12_381::fq_config> g2_point_field_t;
|
||||
static constexpr g2_point_field_t b_g2 = g2_point_field_t{ point_field_t{ PARAMS_BLS12_381::weierstrass_b_g2_re },
|
||||
point_field_t{ PARAMS_BLS12_381::weierstrass_b_g2_im }};
|
||||
typedef Projective<g2_point_field_t, scalar_field_t, b_g2> g2_projective_t;
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
#endif
|
||||
}
|
||||
@@ -1,327 +0,0 @@
|
||||
#ifndef _BLS12_381_LDE
|
||||
#define _BLS12_381_LDE
|
||||
#include <cuda.h>
|
||||
#include "../../appUtils/ntt/lde.cu"
|
||||
#include "../../appUtils/ntt/ntt.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "curve_config.cuh"
|
||||
|
||||
extern "C" BLS12_381::scalar_t* build_domain_cuda_bls12_381(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
if (inverse) {
|
||||
return fill_twiddle_factors_array(domain_size, BLS12_381::scalar_t::omega_inv(logn), stream);
|
||||
} else {
|
||||
return fill_twiddle_factors_array(domain_size, BLS12_381::scalar_t::omega(logn), stream);
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_cuda_bls12_381(BLS12_381::scalar_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BLS12_381::scalar_t,BLS12_381::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_cuda_bls12_381(BLS12_381::projective_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<BLS12_381::projective_t,BLS12_381::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_batch_cuda_bls12_381(BLS12_381::scalar_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BLS12_381::scalar_t,BLS12_381::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_batch_cuda_bls12_381(BLS12_381::projective_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<BLS12_381::projective_t,BLS12_381::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t *d_evaluations, BLS12_381::scalar_t *d_domain, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_batch_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t* d_evaluations, BLS12_381::scalar_t* d_domain, unsigned n,
|
||||
unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t *d_evaluations, BLS12_381::scalar_t *d_domain, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_batch_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t* d_evaluations, BLS12_381::scalar_t* d_domain,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t *d_coefficients, BLS12_381::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_381::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_batch_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t* d_coefficients, BLS12_381::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_381::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t *d_coefficients, BLS12_381::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_381::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_batch_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t* d_coefficients, BLS12_381::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
BLS12_381::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t *d_coefficients, BLS12_381::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BLS12_381::scalar_t *coset_powers, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_batch_cuda_bls12_381(BLS12_381::scalar_t* d_out, BLS12_381::scalar_t* d_coefficients, BLS12_381::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BLS12_381::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t *d_coefficients, BLS12_381::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, BLS12_381::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_batch_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::projective_t* d_coefficients, BLS12_381::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, BLS12_381::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_cuda_bls12_381(BLS12_381::scalar_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_batch_cuda_bls12_381(BLS12_381::scalar_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_cuda_bls12_381(BLS12_381::projective_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_batch_cuda_bls12_381(BLS12_381::projective_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,92 +0,0 @@
|
||||
#ifndef _BLS12_381_MSM
|
||||
#define _BLS12_381_MSM
|
||||
#include "../../appUtils/msm/msm.cu"
|
||||
#include <stdexcept>
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
|
||||
|
||||
extern "C"
|
||||
int msm_cuda_bls12_381(BLS12_381::projective_t *out, BLS12_381::affine_t points[],
|
||||
BLS12_381::scalar_t scalars[], size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm<BLS12_381::scalar_t, BLS12_381::projective_t, BLS12_381::affine_t>(scalars, points, count, out, false, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int msm_batch_cuda_bls12_381(BLS12_381::projective_t* out, BLS12_381::affine_t points[],
|
||||
BLS12_381::scalar_t scalars[], size_t batch_size, size_t msm_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm<BLS12_381::scalar_t, BLS12_381::projective_t, BLS12_381::affine_t>(scalars, points, batch_size, msm_size, out, false, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a polynomial using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the result to.
|
||||
* @param d_scalars Scalars for the MSM. Must be on device.
|
||||
* @param d_points Points for the MSM. Must be on device.
|
||||
* @param count Length of `d_scalars` and `d_points` arrays (they should have equal length).
|
||||
*/
|
||||
extern "C"
|
||||
int commit_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::scalar_t* d_scalars, BLS12_381::affine_t* d_points, size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm(d_scalars, d_points, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a batch of polynomials using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the results to.
|
||||
* @param d_scalars Scalars for the MSMs of all polynomials. Must be on device.
|
||||
* @param d_points Points for the MSMs. Must be on device. It is assumed that this set of bases is used for each MSM.
|
||||
* @param count Length of `d_points` array, `d_scalar` has length `count` * `batch_size`.
|
||||
* @param batch_size Size of the batch.
|
||||
*/
|
||||
extern "C"
|
||||
int commit_batch_cuda_bls12_381(BLS12_381::projective_t* d_out, BLS12_381::scalar_t* d_scalars, BLS12_381::affine_t* d_points, size_t count, size_t batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm(d_scalars, d_points, batch_size, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,19 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
extern "C" bool eq_bls12_381(BLS12_381::projective_t *point1, BLS12_381::projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BLS12_381::point_field_t::zero()) && (point1->y == BLS12_381::point_field_t::zero()) && (point1->z == BLS12_381::point_field_t::zero())) &&
|
||||
!((point2->x == BLS12_381::point_field_t::zero()) && (point2->y == BLS12_381::point_field_t::zero()) && (point2->z == BLS12_381::point_field_t::zero()));
|
||||
}
|
||||
|
||||
#if defined(G2_DEFINED)
|
||||
extern "C" bool eq_g2_bls12_381(BLS12_381::g2_projective_t *point1, BLS12_381::g2_projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BLS12_381::g2_point_field_t::zero()) && (point1->y == BLS12_381::g2_point_field_t::zero()) && (point1->z == BLS12_381::g2_point_field_t::zero())) &&
|
||||
!((point2->x == BLS12_381::g2_point_field_t::zero()) && (point2->y == BLS12_381::g2_point_field_t::zero()) && (point2->z == BLS12_381::g2_point_field_t::zero()));
|
||||
}
|
||||
#endif
|
||||
@@ -1,68 +0,0 @@
|
||||
#ifndef _BLS12_381_VEC_MULT
|
||||
#define _BLS12_381_VEC_MULT
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../utils/storage.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
#include "curve_config.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
|
||||
extern "C" int32_t vec_mod_mult_point_bls12_381(BLS12_381::projective_t *inout,
|
||||
BLS12_381::scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BLS12_381::projective_t, BLS12_381::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t vec_mod_mult_scalar_bls12_381(BLS12_381::scalar_t *inout,
|
||||
BLS12_381::scalar_t *scalar_vec,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
vector_mod_mult<BLS12_381::scalar_t, BLS12_381::scalar_t>(scalar_vec, inout, inout, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int32_t matrix_vec_mod_mult_bls12_381(BLS12_381::scalar_t *matrix_flattened,
|
||||
BLS12_381::scalar_t *input,
|
||||
BLS12_381::scalar_t *output,
|
||||
size_t n_elments,
|
||||
size_t device_id,
|
||||
cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
// TODO: device_id
|
||||
matrix_mod_mult<BLS12_381::scalar_t>(matrix_flattened, input, output, n_elments, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what()); // TODO: error code and message
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
#include "params.cuh"
|
||||
|
||||
namespace BN254 {
|
||||
typedef Field<PARAMS_BN254::fp_config> scalar_field_t;
|
||||
typedef scalar_field_t scalar_t;
|
||||
typedef Field<PARAMS_BN254::fq_config> point_field_t;
|
||||
static constexpr point_field_t b = point_field_t{ PARAMS_BN254::weierstrass_b };
|
||||
typedef Projective<point_field_t, scalar_field_t, b> projective_t;
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
#if defined(G2_DEFINED)
|
||||
typedef ExtensionField<PARAMS_BN254::fq_config> g2_point_field_t;
|
||||
static constexpr g2_point_field_t b_g2 = g2_point_field_t{ point_field_t{ PARAMS_BN254::weierstrass_b_g2_re },
|
||||
point_field_t{ PARAMS_BN254::weierstrass_b_g2_im }};
|
||||
typedef Projective<g2_point_field_t, scalar_field_t, b_g2> g2_projective_t;
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
#endif
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
extern "C" bool eq_bn254(BN254::projective_t *point1, BN254::projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BN254::point_field_t::zero()) && (point1->y == BN254::point_field_t::zero()) && (point1->z == BN254::point_field_t::zero())) &&
|
||||
!((point2->x == BN254::point_field_t::zero()) && (point2->y == BN254::point_field_t::zero()) && (point2->z == BN254::point_field_t::zero()));
|
||||
}
|
||||
|
||||
#if defined(G2_DEFINED)
|
||||
extern "C" bool eq_g2_bn254(BN254::g2_projective_t *point1, BN254::g2_projective_t *point2)
|
||||
{
|
||||
return (*point1 == *point2) &&
|
||||
!((point1->x == BN254::g2_point_field_t::zero()) && (point1->y == BN254::g2_point_field_t::zero()) && (point1->z == BN254::g2_point_field_t::zero())) &&
|
||||
!((point2->x == BN254::g2_point_field_t::zero()) && (point2->y == BN254::g2_point_field_t::zero()) && (point2->z == BN254::g2_point_field_t::zero()));
|
||||
}
|
||||
#endif
|
||||
@@ -1,4 +0,0 @@
|
||||
#include "projective.cu"
|
||||
#include "lde.cu"
|
||||
#include "msm.cu"
|
||||
#include "ve_mod_mult.cu"
|
||||
@@ -1,14 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../primitives/field.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
#include "params.cuh"
|
||||
|
||||
namespace BN254 {
|
||||
typedef Field<CURVE_NAME_U::fp_config> scalar_field_t;
|
||||
typedef scalar_field_t scalar_t;
|
||||
typedef Field<CURVE_NAME_U::fq_config> point_field_t;
|
||||
typedef Projective<point_field_t, scalar_field_t, CURVE_NAME_U::group_generator, CURVE_NAME_U::weierstrass_b> projective_t;
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
}
|
||||
@@ -1,327 +0,0 @@
|
||||
#ifndef _CURVE_NAME_U_LDE
|
||||
#define _CURVE_NAME_U_LDE
|
||||
#include <cuda.h>
|
||||
#include "../../appUtils/ntt/lde.cu"
|
||||
#include "../../appUtils/ntt/ntt.cuh"
|
||||
#include "../../appUtils/vector_manipulation/ve_mod_mult.cuh"
|
||||
#include "curve_config.cuh"
|
||||
|
||||
extern "C" CURVE_NAME_U::scalar_t* build_domain_cuda_CURVE_NAME_L(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
if (inverse) {
|
||||
return fill_twiddle_factors_array(domain_size, CURVE_NAME_U::scalar_t::omega_inv(logn), stream);
|
||||
} else {
|
||||
return fill_twiddle_factors_array(domain_size, CURVE_NAME_U::scalar_t::omega(logn), stream);
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<CURVE_NAME_U::scalar_t,CURVE_NAME_U::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t *arr, uint32_t n, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_template<CURVE_NAME_U::projective_t,CURVE_NAME_U::scalar_t>(arr, n, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ntt_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<CURVE_NAME_U::scalar_t,CURVE_NAME_U::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int ecntt_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t *arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return ntt_end2end_batch_template<CURVE_NAME_U::projective_t,CURVE_NAME_U::scalar_t>(arr, arr_size, batch_size, inverse, stream); // TODO: pass device_id
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t *d_evaluations, CURVE_NAME_U::scalar_t *d_domain, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_scalars_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t* d_evaluations, CURVE_NAME_U::scalar_t* d_domain, unsigned n,
|
||||
unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t *d_evaluations, CURVE_NAME_U::scalar_t *d_domain, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
return interpolate(d_out, d_evaluations, d_domain, n, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int interpolate_points_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t* d_evaluations, CURVE_NAME_U::scalar_t* d_domain,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return interpolate_batch(d_out, d_evaluations, d_domain, n, batch_size, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t *d_coefficients, CURVE_NAME_U::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
CURVE_NAME_U::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t* d_coefficients, CURVE_NAME_U::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
CURVE_NAME_U::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t *d_coefficients, CURVE_NAME_U::scalar_t *d_domain,
|
||||
unsigned domain_size, unsigned n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
CURVE_NAME_U::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t* d_coefficients, CURVE_NAME_U::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
CURVE_NAME_U::scalar_t* _null = nullptr;
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, false, _null, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t *d_coefficients, CURVE_NAME_U::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, CURVE_NAME_U::scalar_t *coset_powers, unsigned device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_scalars_on_coset_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* d_out, CURVE_NAME_U::scalar_t* d_coefficients, CURVE_NAME_U::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, CURVE_NAME_U::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t *d_coefficients, CURVE_NAME_U::scalar_t *d_domain, unsigned domain_size,
|
||||
unsigned n, CURVE_NAME_U::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate(d_out, d_coefficients, d_domain, domain_size, n, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int evaluate_points_on_coset_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::projective_t* d_coefficients, CURVE_NAME_U::scalar_t* d_domain, unsigned domain_size,
|
||||
unsigned n, unsigned batch_size, CURVE_NAME_U::scalar_t *coset_powers, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
return evaluate_batch(d_out, d_coefficients, d_domain, domain_size, n, batch_size, true, coset_powers, stream);
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_scalars_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::scalar_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* arr, int n, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order(arr, n, logn, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int reverse_order_points_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* arr, int n, int batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
uint32_t logn = uint32_t(log(n) / log(2));
|
||||
cudaStreamCreate(&stream);
|
||||
reverse_order_batch(arr, n, logn, batch_size, stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,93 +0,0 @@
|
||||
#ifndef _CURVE_NAME_U_MSM
|
||||
#define _CURVE_NAME_U_MSM
|
||||
#include "../../appUtils/msm/msm.cu"
|
||||
#include <stdexcept>
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
|
||||
|
||||
extern "C"
|
||||
int msm_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t *out, CURVE_NAME_U::affine_t points[],
|
||||
CURVE_NAME_U::scalar_t scalars[], size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm<CURVE_NAME_U::scalar_t, CURVE_NAME_U::projective_t, CURVE_NAME_U::affine_t>(scalars, points, count, out, false, stream);
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" int msm_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* out, CURVE_NAME_U::affine_t points[],
|
||||
CURVE_NAME_U::scalar_t scalars[], size_t batch_size, size_t msm_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm<CURVE_NAME_U::scalar_t, CURVE_NAME_U::projective_t, CURVE_NAME_U::affine_t>(scalars, points, batch_size, msm_size, out, false, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
|
||||
return CUDA_SUCCESS;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a polynomial using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the result to.
|
||||
* @param d_scalars Scalars for the MSM. Must be on device.
|
||||
* @param d_points Points for the MSM. Must be on device.
|
||||
* @param count Length of `d_scalars` and `d_points` arrays (they should have equal length).
|
||||
*/
|
||||
extern "C"
|
||||
int commit_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::scalar_t* d_scalars, CURVE_NAME_U::affine_t* d_points, size_t count, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
large_msm(d_scalars, d_points, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to a batch of polynomials using the MSM.
|
||||
* Note: this function just calls the MSM, it doesn't convert between evaluation and coefficient form of scalars or points.
|
||||
* @param d_out Ouptut point to write the results to.
|
||||
* @param d_scalars Scalars for the MSMs of all polynomials. Must be on device.
|
||||
* @param d_points Points for the MSMs. Must be on device. It is assumed that this set of bases is used for each MSM.
|
||||
* @param count Length of `d_points` array, `d_scalar` has length `count` * `batch_size`.
|
||||
* @param batch_size Size of the batch.
|
||||
*/
|
||||
extern "C"
|
||||
int commit_batch_cuda_CURVE_NAME_L(CURVE_NAME_U::projective_t* d_out, CURVE_NAME_U::scalar_t* d_scalars, CURVE_NAME_U::affine_t* d_points, size_t count, size_t batch_size, size_t device_id = 0, cudaStream_t stream = 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cudaStreamCreate(&stream);
|
||||
batched_large_msm(d_scalars, d_points, batch_size, count, d_out, true, stream);
|
||||
cudaStreamSynchronize(stream);
|
||||
return 0;
|
||||
}
|
||||
catch (const std::runtime_error &ex)
|
||||
{
|
||||
printf("error %s", ex.what());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,8 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include "curve_config.cuh"
|
||||
#include "../../primitives/projective.cuh"
|
||||
|
||||
extern "C" bool eq_CURVE_NAME_L(CURVE_NAME_U::projective_t *point1, CURVE_NAME_U::projective_t *point2, size_t device_id = 0)
|
||||
{
|
||||
return (*point1 == *point2);
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
#include "projective.cu"
|
||||
#include "lde.cu"
|
||||
#include "msm.cu"
|
||||
#include "ve_mod_mult.cu"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user