Compare commits

..

2 Commits

Author SHA1 Message Date
guy-ingo
de1d27d846 new api - wip 2023-06-22 23:39:54 +03:00
ImmanuelSegol
392c9f8e2e wip - towrds better rust frontend 2023-06-05 14:41:29 +03:00
257 changed files with 22068 additions and 39848 deletions

View File

@@ -1,38 +0,0 @@
Language: Cpp
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveMacros: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: true
BinPackParameters: false
BreakBeforeBraces: Custom
BraceWrapping:
AfterClass: true
AfterFunction: true
BreakBeforeBinaryOperators: false
BreakBeforeTernaryOperators: true
ColumnLimit: 120
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DisableFormat: false
IndentFunctionDeclarationAfterType: false
IndentWidth: 2
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: All
PointerAlignment: Left
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
Standard: c++17
UseTab: Never

View File

@@ -1,40 +0,0 @@
name: Build Dev
on:
pull_request:
branches:
- dev
paths:
- icicle/**
- src/**
- Cargo.toml
- build.rs
env:
CARGO_TERM_COLOR: always
ARCH_TYPE: sm_70
jobs:
build-linux:
runs-on: [self-hosted, Linux, X64, icicle]
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Build
run: cargo build --release --verbose
build-windows:
runs-on: windows-2022
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Download and Install Cuda
uses: Jimver/cuda-toolkit@v0.2.11
with:
cuda: '12.0.0'
method: 'network'
# https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
sub-packages: '["cudart", "nvcc", "thrust"]'
- name: Build
run: cargo build --release --verbose

View File

@@ -1,43 +1,49 @@
name: Build
on:
on:
pull_request:
types:
- ready_for_review
- opened
branches:
- main
- "main"
- "dev"
paths:
- icicle/**
- src/**
- Cargo.toml
- build.rs
- "icicle/**"
- "src/**"
- "Cargo.toml"
- "build.rs"
env:
CARGO_TERM_COLOR: always
ARCH_TYPE: sm_70
DEFAULT_STREAM: per-thread
jobs:
build-linux:
runs-on: [self-hosted, Linux, X64, icicle]
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v3
# Checkout code
- uses: actions/checkout@v3
# Download (or from cache) and install CUDA Toolkit 12.1.0
- uses: Jimver/cuda-toolkit@v0.2.9
id: cuda-toolkit
with:
cuda: '12.1.0'
use-github-cache: true
# Build from cargo - Rust utils are preinstalled on latest images
# https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md#rust-tools
- name: Build
run: cargo build --release --verbose
build-windows:
runs-on: windows-2022
runs-on: windows-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Download and Install Cuda
uses: Jimver/cuda-toolkit@v0.2.11
- uses: actions/checkout@v3
- uses: Jimver/cuda-toolkit@v0.2.9
id: cuda-toolkit
with:
cuda: '12.0.0'
method: 'network'
# https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
sub-packages: '["cudart", "nvcc", "thrust"]'
cuda: '12.1.0'
use-github-cache: true
- name: Build
run: cargo build --release --verbose

3
.gitignore vendored
View File

@@ -5,9 +5,6 @@
*.cubin
*.bin
*.fatbin
*.so
*.nsys-rep
*.ncu-rep
**/target
**/.vscode
**/.*lock*csv#

View File

@@ -1,10 +0,0 @@
# https://github.com/rust-lang/rustfmt/blob/master/Configurations.md
# Stable Configs
chain_width = 0
max_width = 120
merge_derives = true
use_field_init_shorthand = true
use_try_shorthand = true
# Unstable Configs

View File

@@ -1,49 +1,9 @@
[package]
name = "icicle-utils"
[workspace]
name = "icicle"
version = "0.1.0"
edition = "2021"
authors = [ "Ingonyama" ]
description = "An implementation of the Ingonyama CUDA Library"
homepage = "https://www.ingonyama.com"
repository = "https://github.com/ingonyama-zk/icicle"
[[bench]]
name = "ntt"
path = "benches/ntt.rs"
harness = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[[bench]]
name = "msm"
path = "benches/msm.rs"
harness = false
members = ["icicle-core", "bls12-381", "bls12-377", "bn254"]
[dependencies]
hex = "*"
ark-std = "0.3.0"
ark-ff = "0.3.0"
ark-poly = "0.3.0"
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
ark-bls12-381 = "0.3.0"
ark-bls12-377 = "0.3.0"
ark-bn254 = "0.3.0"
serde = { version = "1.0", features = ["derive"] }
serde_derive = "1.0"
serde_cbor = "0.11.2"
rustacuda = "0.1"
rustacuda_core = "0.1"
rustacuda_derive = "0.1"
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
[build-dependencies]
cc = { version = "1.0", features = ["parallel"] }
[dev-dependencies]
"criterion" = "0.4.0"
[features]
default = ["bls12_381"]
bls12_381 = ["ark-bls12-381/curve"]
g2 = []

View File

@@ -4,15 +4,6 @@
![image (4)](https://user-images.githubusercontent.com/2446179/223707486-ed8eb5ab-0616-4601-8557-12050df8ccf7.png)
<div align="center">
![Build status](https://github.com/ingonyama-zk/icicle/actions/workflows/build.yml/badge.svg)
![Discord server](https://img.shields.io/discord/1063033227788423299?label=Discord&logo=Discord&logoColor=%23&style=plastic)
![Follow us on twitter](https://img.shields.io/twitter/follow/Ingo_zk?style=social)
</div>
## Background
Zero Knowledge Proofs (ZKPs) are considered one of the greatest achievements of modern cryptography. Accordingly, ZKPs are expected to disrupt a number of industries and will usher in an era of trustless and privacy preserving services and infrastructure.
@@ -45,7 +36,7 @@ ICICLE is a CUDA implementation of general functions widely used in ZKP. ICICLE
```sh
mkdir -p build
nvcc -o build/<binary_name> ./icicle/curves/index.cu -lib -arch=native
nvcc -o build/<ENTER_DIR_NAME> ./icicle/appUtils/ntt/ntt.cu ./icicle/appUtils/msm/msm.cu ./icicle/appUtils/vector_manipulation/ve_mod_mult.cu ./icicle/primitives/projective.cu -lib -arch=native
```
### Testing the CUDA code
@@ -104,74 +95,52 @@ Supporting additional curves can be done as follows:
Create a JSON file with the curve parameters. The curve is defined by the following parameters:
- ``curve_name`` - e.g. ``bls12_381``.
- ``modulus_p`` - scalar field modulus (in decimal).
- ``bit_count_p`` - number of bits needed to represent `` modulus_p`` .
- ``limb_p`` - number of bytes needed to represent `` modulus_p`` (rounded).
- ``modolus_p`` - scalar field modolus (in decimal).
- ``bit_count_p`` - number of bits needed to represent `` modolus_p`` .
- ``limb_p`` - number of bytes needed to represent `` modolus_p`` (rounded).
- ``ntt_size`` - log of the maximal size subgroup of the scalar field.
- ``modulus_q`` - base field modulus (in decimal).
- ``bit_count_q`` - number of bits needed to represent `` modulus_q`` .
- ``limb_q`` number of bytes needed to represent `` modulus_p`` (rounded).
- ``modolus_q`` - base field modulus (in decimal).
- ``bit_count_q`` - number of bits needed to represent `` modolus_q`` .
- ``limb_q`` number of bytes needed to represent `` modolus_p`` (rounded).
- ``weierstrass_b`` - Weierstrauss constant of the curve.
- ``weierstrass_b_g2_re`` - Weierstrauss real constant of the g2 curve.
- ``weierstrass_b_g2_im`` - Weierstrauss imaginary constant of the g2 curve.
- ``gen_x`` - x-value of a generator element for the curve.
- ``gen_y`` - y-value of a generator element for the curve.
- ``gen_x_re`` - real x-value of a generator element for the g2 curve.
- ``gen_x_im`` - imaginary x-value of a generator element for the g2 curve.
- ``gen_y_re`` - real y-value of a generator element for the g2 curve.
- ``gen_y_im`` - imaginary y-value of a generator element for the g2 curve.
Here's an example for BLS12-381.
```
{
"curve_name" : "bls12_381",
"modulus_p" : 52435875175126190479447740508185965837690552500527637822603658699938581184513,
"modolus_p" : 52435875175126190479447740508185965837690552500527637822603658699938581184513,
"bit_count_p" : 255,
"limb_p" : 8,
"ntt_size" : 32,
"modulus_q" : 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787,
"modolus_q" : 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787,
"bit_count_q" : 381,
"limb_q" : 12,
"weierstrass_b" : 4,
"weierstrass_b_g2_re":4,
"weierstrass_b_g2_im":4,
"gen_x" : 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507,
"gen_y" : 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569,
"gen_x_re" : 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160,
"gen_x_im" : 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758,
"gen_y_re" : 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905,
"gen_y_im" : 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582
"gen_y" : 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569
}
```
Save the parameters JSON file under the ``curve_parameters`` directory.
Save the parameters JSON file in ``curve_parameters``.
Then run the Python script ``new_curve_script.py `` from the root folder:
Then run the Python script ``new_curve_script.py `` from the main icicle folder:
```
python3 ./curve_parameters/new_curve_script.py ./curve_parameters/bls12_381.json
python3 ./curve_parameters/new_curve_script_rust.py ./curve_parameters/bls12_381.json
```
The script does the following:
- Creates a folder in ``icicle/curves`` with the curve name, which contains all of the files needed for the supported operations in cuda.
- Adds the curve's exported operations to ``icicle/curves/index.cu``.
- Adds the curve exported operations to ``icicle/curves/index.cu``.
- Creates a file with the curve name in ``src/curves`` with the relevant objects for the curve.
- Creates a test file with the curve name in ``src``.
Testing the new curve could be done by running the tests in ``tests_curve_name`` (e.g. ``tests_bls12_381``).
## Contributions
Join our [Discord Server][DISCORD] and find us on the icicle channel. We will be happy to work together to support your use case and talk features, bugs and design.
### Development Contributions
If you are changing code, please make sure to change your [git hooks path][HOOKS_DOCS] to the repo's [hooks directory][HOOKS_PATH] by running the following command:
```sh
git config core.hooksPath ./scripts/hooks
```
This will ensure our custom hooks are run and will make it easier to follow our coding guidelines.
Join our [Discord Server](https://discord.gg/Y4SkbDf2Ff) and find us on the icicle channel. We will be happy to work together to support your use case and talk features, bugs and design.
### Hall of Fame
@@ -184,18 +153,13 @@ ICICLE is distributed under the terms of the MIT License.
See [LICENSE-MIT][LMIT] for details.
<!-- Begin Links -->
[BLS12-381]: ./icicle/curves/bls12_381/supported_operations.cu
[BLS12-377]: ./icicle/curves/bls12_377/supported_operations.cu
[BN254]: ./icicle/curves/bn254/supported_operations.cu
[BLS12-381]: ./icicle/curves/bls12_381.cuh
[NVCC]: https://docs.nvidia.com/cuda/#installation-guides
[CRV_TEMPLATE]: ./icicle/curves/curve_template.cuh
[CRV_CONFIG]: ./icicle/curves/curve_config.cuh
[B_SCRIPT]: ./build.rs
[FDI]: https://github.com/ingonyama-zk/fast-danksharding
[LMIT]: ./LICENSE
[DISCORD]: https://discord.gg/Y4SkbDf2Ff
[googletest]: https://github.com/google/googletest/
[HOOKS_DOCS]: https://git-scm.com/docs/githooks
[HOOKS_PATH]: ./scripts/hooks/
<!-- End Links -->

View File

@@ -1,56 +0,0 @@
extern crate criterion;
use criterion::{criterion_group, criterion_main, Criterion};
use icicle_utils::test_bls12_381::{
commit_batch_bls12_381, generate_random_points_bls12_381, set_up_scalars_bls12_381,
};
use icicle_utils::utils::*;
#[cfg(feature = "g2")]
use icicle_utils::{commit_batch_g2, field::ExtensionField};
use rustacuda::prelude::*;
const LOG_MSM_SIZES: [usize; 1] = [12];
const BATCH_SIZES: [usize; 2] = [128, 256];
fn bench_msm(c: &mut Criterion) {
let mut group = c.benchmark_group("MSM");
for log_msm_size in LOG_MSM_SIZES {
for batch_size in BATCH_SIZES {
let msm_size = 1 << log_msm_size;
let (scalars, _, _) = set_up_scalars_bls12_381(msm_size, 0, false);
let batch_scalars = vec![scalars; batch_size].concat();
let mut d_scalars = DeviceBuffer::from_slice(&batch_scalars[..]).unwrap();
let points = generate_random_points_bls12_381(msm_size, get_rng(None));
let batch_points = vec![points; batch_size].concat();
let mut d_points = DeviceBuffer::from_slice(&batch_points[..]).unwrap();
#[cfg(feature = "g2")]
let g2_points = generate_random_points::<ExtensionField>(msm_size, get_rng(None));
#[cfg(feature = "g2")]
let g2_batch_points = vec![g2_points; batch_size].concat();
#[cfg(feature = "g2")]
let mut d_g2_points = DeviceBuffer::from_slice(&g2_batch_points[..]).unwrap();
group
.sample_size(30)
.bench_function(
&format!("MSM of size 2^{} in batch {}", log_msm_size, batch_size),
|b| b.iter(|| commit_batch_bls12_381(&mut d_points, &mut d_scalars, batch_size)),
);
#[cfg(feature = "g2")]
group
.sample_size(10)
.bench_function(
&format!("G2 MSM of size 2^{} in batch {}", log_msm_size, batch_size),
|b| b.iter(|| commit_batch_g2(&mut d_g2_points, &mut d_scalars, batch_size)),
);
}
}
}
criterion_group!(msm_benches, bench_msm);
criterion_main!(msm_benches);

View File

@@ -1,85 +0,0 @@
extern crate criterion;
use criterion::{criterion_group, criterion_main, Criterion};
use icicle_utils::test_bls12_381::*;
const LOG_NTT_SIZES: [usize; 3] = [20, 9, 10];
const BATCH_SIZES: [usize; 3] = [1, 512, 1024];
fn bench_ntt(c: &mut Criterion) {
let mut group = c.benchmark_group("NTT");
for log_ntt_size in LOG_NTT_SIZES {
for batch_size in BATCH_SIZES {
let ntt_size = 1 << log_ntt_size;
if ntt_size * batch_size > 1 << 25 {
continue;
}
let scalar_samples = 20;
let (_, mut d_evals, mut d_domain) = set_up_scalars_bls12_381(ntt_size * batch_size, log_ntt_size, true);
group
.sample_size(scalar_samples)
.bench_function(
&format!("Scalar NTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| evaluate_scalars_batch_bls12_381(&mut d_evals, &mut d_domain, batch_size)),
);
group
.sample_size(scalar_samples)
.bench_function(
&format!("Scalar iNTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| interpolate_scalars_batch_bls12_381(&mut d_evals, &mut d_domain, batch_size)),
);
group
.sample_size(scalar_samples)
.bench_function(
&format!("Scalar inplace NTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| ntt_inplace_batch_bls12_381(&mut d_evals, &mut d_domain, batch_size, false, 0)),
);
group
.sample_size(scalar_samples)
.bench_function(
&format!("Scalar inplace iNTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| ntt_inplace_batch_bls12_381(&mut d_evals, &mut d_domain, batch_size, true, 0)),
);
drop(d_evals);
drop(d_domain);
if ntt_size * batch_size > 1 << 18 {
continue;
}
let point_samples = 10;
let (_, mut d_points_evals, mut d_domain) =
set_up_points_bls12_381(ntt_size * batch_size, log_ntt_size, true);
group
.sample_size(point_samples)
.bench_function(
&format!("EC NTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| interpolate_points_batch_bls12_381(&mut d_points_evals, &mut d_domain, batch_size)),
);
group
.sample_size(point_samples)
.bench_function(
&format!("EC iNTT of size 2^{} in batch {}", log_ntt_size, batch_size),
|b| b.iter(|| evaluate_points_batch_bls12_381(&mut d_points_evals, &mut d_domain, batch_size)),
);
drop(d_points_evals);
drop(d_domain);
}
}
}
criterion_group!(ntt_benches, bench_ntt);
criterion_main!(ntt_benches);

34
bls12-377/Cargo.toml Normal file
View File

@@ -0,0 +1,34 @@
[package]
name = "bls12-377"
version = "0.1.0"
edition = "2021"
authors = [ "Ingonyama" ]
[dependencies]
icicle-core = { path = "../icicle-core" }
hex = "*"
ark-std = "0.3.0"
ark-ff = "0.3.0"
ark-poly = "0.3.0"
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
ark-bls12-377 = "0.3.0"
serde = { version = "1.0", features = ["derive"] }
serde_derive = "1.0"
serde_cbor = "0.11.2"
rustacuda = "0.1"
rustacuda_core = "0.1"
rustacuda_derive = "0.1"
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
[build-dependencies]
cc = { version = "1.0", features = ["parallel"] }
[dev-dependencies]
"criterion" = "0.4.0"
[features]
g2 = []

View File

@@ -23,9 +23,12 @@ fn main() {
nvcc.define("G2_DEFINED", None);
}
nvcc.cuda(true);
nvcc.define("FEATURE_BLS12_377", None);
nvcc.debug(false);
nvcc.flag(&arch);
nvcc.flag(&stream);
nvcc.files(["./icicle/curves/index.cu"]);
nvcc.files([
"../icicle-cuda/curves/index.cu",
]);
nvcc.compile("ingo_icicle"); //TODO: extension??
}

View File

@@ -0,0 +1,4 @@
pub trait Field<const NUM_LIMBS: usize> {
const MODOLUS: [u32;NUM_LIMBS];
const LIMBS: usize = NUM_LIMBS;
}

View File

@@ -0,0 +1,3 @@
pub mod field;
pub mod scalar;
pub mod point;

View File

@@ -0,0 +1,106 @@
use std::ffi::c_uint;
use ark_ec::AffineCurve;
use ark_ff::{BigInteger256, PrimeField};
use std::mem::transmute;
use ark_ff::Field;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use super::scalar::{get_fixed_limbs, self};
#[derive(Debug, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointT<BF: scalar::ScalarTrait> {
pub x: BF,
pub y: BF,
pub z: BF,
}
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
fn default() -> Self {
PointT::zero()
}
}
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
pub fn zero() -> Self {
PointT {
x: BF::zero(),
y: BF::one(),
z: BF::zero(),
}
}
pub fn infinity() -> Self {
Self::zero()
}
}
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointAffineNoInfinityT<BF> {
pub x: BF,
pub y: BF,
}
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
fn default() -> Self {
PointAffineNoInfinityT {
x: BF::zero(),
y: BF::zero(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
///From u32 limbs x,y
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
PointAffineNoInfinityT {
x: BF::from_limbs(x),
y: BF::from_limbs(y)
}
}
pub fn limbs(&self) -> Vec<u32> {
[self.x.limbs(), self.y.limbs()].concat()
}
pub fn to_projective(&self) -> PointT<BF> {
PointT {
x: self.x,
y: self.y,
z: BF::one(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
PointT {
x: BF::from_limbs(x),
y: BF::from_limbs(y),
z: BF::from_limbs(z)
}
}
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
let l = value.len();
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
PointT {
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
}
}
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
PointAffineNoInfinityT {
x: self.x,
y: self.y,
}
}
}

View File

@@ -0,0 +1,102 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination};
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use super::field::{Field, self};
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
match val.len() {
n if n < NUM_LIMBS => {
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
padded[..val.len()].copy_from_slice(&val);
padded
}
n if n == NUM_LIMBS => val.try_into().unwrap(),
_ => panic!("slice has too many elements"),
}
}
pub trait ScalarTrait{
fn base_limbs() -> usize;
fn zero() -> Self;
fn from_limbs(value: &[u32]) -> Self;
fn one() -> Self;
fn to_bytes_le(&self) -> Vec<u8>;
fn limbs(&self) -> &[u32];
}
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(C)]
pub struct ScalarT<M, const NUM_LIMBS: usize> {
pub(crate) phantom: PhantomData<M>,
pub(crate) value : [u32; NUM_LIMBS]
}
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
where
M: Field<NUM_LIMBS>,
{
fn base_limbs() -> usize {
return NUM_LIMBS;
}
fn zero() -> Self {
ScalarT {
value: [0u32; NUM_LIMBS],
phantom: PhantomData,
}
}
fn from_limbs(value: &[u32]) -> Self {
Self {
value: get_fixed_limbs(value),
phantom: PhantomData,
}
}
fn one() -> Self {
let mut s = [0u32; NUM_LIMBS];
s[0] = 1;
ScalarT { value: s, phantom: PhantomData }
}
fn to_bytes_le(&self) -> Vec<u8> {
self.value
.iter()
.map(|s| s.to_le_bytes().to_vec())
.flatten()
.collect::<Vec<_>>()
}
fn limbs(&self) -> &[u32] {
&self.value
}
}
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
Self::from_limbs(value)
}
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
let mut value = value.to_vec();
value.reverse();
Self::from_limbs_le(&value)
}
// Additional Functions
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
}
}

View File

@@ -0,0 +1,62 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
use std::marker::PhantomData;
use std::convert::TryInto;
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
use crate::basic_structs::scalar::ScalarT;
use crate::basic_structs::field::Field;
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct ScalarField;
impl Field<8> for ScalarField {
const MODOLUS: [u32; 8] = [0x0;8];
}
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct BaseField;
impl Field<12> for BaseField {
const MODOLUS: [u32; 12] = [0x0;12];
}
pub type Scalar = ScalarT<ScalarField,8>;
impl Default for Scalar {
fn default() -> Self {
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Scalar{}
pub type Base = ScalarT<BaseField,12>;
impl Default for Base {
fn default() -> Self {
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Base{}
pub type Point = PointT<Base>;
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
extern "C" {
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
unsafe { eq(self, other) != 0 }
}
}

798
bls12-377/src/from_cuda.rs Normal file
View File

@@ -0,0 +1,798 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
use crate::basic_structs::scalar::ScalarTrait;
use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bls12_377::{Fq as Fq_BLS12_377, Fr as Fr_BLS12_377, G1Affine as G1Affine_BLS12_377, G1Projective as G1Projective_BLS12_377};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
extern "C" {
fn msm_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
count: usize,
device_id: usize,
) -> c_uint;
fn msm_batch_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
batch_size: usize,
msm_size: usize,
device_id: usize,
) -> c_uint;
fn commit_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
device_id: usize,
) -> c_uint;
fn commit_batch_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
batch_size: usize,
device_id: usize,
) -> c_uint;
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ntt_batch_cuda(
inout: *mut Scalar,
arr_size: usize,
n: usize,
inverse: bool,
) -> c_int;
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
fn interpolate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn interpolate_points_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_points_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn reverse_order_scalars_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_scalars_batch_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_cuda(
d_arr: DevicePointer<Point>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_batch_cuda(
d_arr: DevicePointer<Point>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn vec_mod_mult_point(
inout: *mut Point,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn vec_mod_mult_scalar(
inout: *mut Scalar,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn matrix_vec_mod_mult(
matrix_flattened: *const Scalar,
input: *const Scalar,
output: *mut Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
}
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = Point::zero();
unsafe {
msm_cuda(
&mut ret as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
scalars.len(),
device_id,
)
};
ret
}
pub fn msm_batch(
points: &[PointAffineNoInfinity],
scalars: &[Scalar],
batch_size: usize,
device_id: usize,
) -> Vec<Point> {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = vec![Point::zero(); batch_size];
unsafe {
msm_batch_cuda(
&mut ret[0] as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
batch_size,
count / batch_size,
device_id,
)
};
ret
}
pub fn commit(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
) -> DeviceBox<Point> {
let mut res = DeviceBox::new(&Point::zero()).unwrap();
unsafe {
commit_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len(),
0,
);
}
return res;
}
pub fn commit_batch(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
unsafe {
commit_batch_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len() / batch_size,
batch_size,
0,
);
}
return res;
}
/// Compute an in-place NTT on the input data.
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
let ret_code = unsafe {
ntt_cuda(
values as *mut _ as *mut Scalar,
values.len(),
inverse,
device_id,
)
};
ret_code
}
pub fn ntt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, false);
}
pub fn intt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, true);
}
/// Compute an in-place NTT on the input data.
fn ntt_internal_batch(
values: &mut [Scalar],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ntt_batch_cuda(
values as *mut _ as *mut Scalar,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, false);
}
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, true);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
unsafe {
ecntt_cuda(
values as *mut _ as *mut Point,
values.len(),
inverse,
device_id,
)
}
}
pub fn ecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, false, device_id);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, true, device_id);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal_batch(
values: &mut [Point],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ecntt_batch_cuda(
values as *mut _ as *mut Point,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, false);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, true);
}
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
unsafe {
DeviceBuffer::from_raw_parts(build_domain_cuda(
domain_size,
logn,
inverse,
0
), domain_size)
}
}
pub fn reverse_order_scalars(
d_scalars: &mut DeviceBuffer<Scalar>,
) {
unsafe { reverse_order_scalars_cuda(
d_scalars.as_device_ptr(),
d_scalars.len(),
0
); }
}
pub fn reverse_order_scalars_batch(
d_scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) {
unsafe { reverse_order_scalars_batch_cuda(
d_scalars.as_device_ptr(),
d_scalars.len() / batch_size,
batch_size,
0
); }
}
pub fn reverse_order_points(
d_points: &mut DeviceBuffer<Point>,
) {
unsafe { reverse_order_points_cuda(
d_points.as_device_ptr(),
d_points.len(),
0
); }
}
pub fn reverse_order_points_batch(
d_points: &mut DeviceBuffer<Point>,
batch_size: usize,
) {
unsafe { reverse_order_points_batch_cuda(
d_points.as_device_ptr(),
d_points.len() / batch_size,
batch_size,
0
); }
}
pub fn interpolate_scalars(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_scalars_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_scalars_batch(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_scalars_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn interpolate_points(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_points_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_points_batch(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_points_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn evaluate_scalars(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_scalars_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_points(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_points_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_point(
a as *mut _ as *mut Point,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_scalar(
a as *mut _ as *mut Scalar,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
// Multiply a matrix by a scalar:
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
let mut c = Vec::with_capacity(b.len());
for i in 0..b.len() {
c.push(Scalar::zero());
}
unsafe {
matrix_vec_mod_mult(
a as *const _ as *const Scalar,
b as *const _ as *const Scalar,
c.as_mut_slice() as *mut _ as *mut Scalar,
b.len(),
device_id,
);
}
c
}
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
unsafe { buf_cpy.copy_from(buf) };
return buf_cpy;
}
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
let rng: Box<dyn RngCore> = match seed {
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
None => Box::new(rand::thread_rng()),
};
rng
}
fn set_up_device() {
// Set up the context, load the module, and create a stream to run kernels in.
rustacuda::init(CudaFlags::empty()).unwrap();
let device = Device::get_device(0).unwrap();
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
}
pub fn generate_random_points(
count: usize,
mut rng: Box<dyn RngCore>,
) -> Vec<PointAffineNoInfinity> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BLS12_377::rand(&mut rng)).to_xy_strip_z())
.collect()
}
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BLS12_377::rand(&mut rng)))
.collect()
}
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
(0..count)
.map(|_| Scalar::from_ark(Fr_BLS12_377::rand(&mut rng).into_repr()))
.collect()
}
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalar
let vector = generate_random_points_proj(test_size, get_rng(seed));
let mut vector_mut = vector.clone();
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
(vector_mut, d_vector, d_domain)
}
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalars
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
(vector_mut, d_vector, d_domain)
}

4
bls12-377/src/lib.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod test_bls12_377;
pub mod basic_structs;
pub mod from_cuda;
pub mod curve_structs;

View File

@@ -0,0 +1,816 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
pub use crate::basic_structs::scalar::ScalarTrait;
pub use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bls12_377::{Fq as Fq_BLS12_377, Fr as Fr_BLS12_377, G1Affine as G1Affine_BLS12_377, G1Projective as G1Projective_BLS12_377};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
impl Scalar {
pub fn to_biginteger254(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn to_ark(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_biginteger256(ark: BigInteger256) -> Self {
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
}
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
unsafe { transmute(*self) }
}
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
}
pub fn to_ark_transmute(&self) -> Fr_BLS12_377 {
unsafe { std::mem::transmute(*self) }
}
pub fn from_ark_transmute(v: &Fr_BLS12_377) -> Scalar {
unsafe { std::mem::transmute_copy(v) }
}
pub fn to_ark_mod_p(&self) -> Fr_BLS12_377 {
Fr_BLS12_377::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
}
pub fn to_ark_repr(&self) -> Fr_BLS12_377 {
Fr_BLS12_377::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
}
pub fn from_ark(v: BigInteger256) -> Scalar {
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
}
}
impl Base {
pub fn to_ark(&self) -> BigInteger384 {
BigInteger384::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_ark(ark: BigInteger384) -> Self {
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
}
}
impl Point {
pub fn to_ark(&self) -> G1Projective_BLS12_377 {
self.to_ark_affine().into_projective()
}
pub fn to_ark_affine(&self) -> G1Affine_BLS12_377 {
//TODO: generic conversion
use ark_ff::Field;
use std::ops::Mul;
let proj_x_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.x.to_bytes_le());
let proj_y_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.y.to_bytes_le());
let proj_z_field = Fq_BLS12_377::from_le_bytes_mod_order(&self.z.to_bytes_le());
let inverse_z = proj_z_field.inverse().unwrap();
let aff_x = proj_x_field.mul(inverse_z);
let aff_y = proj_y_field.mul(inverse_z);
G1Affine_BLS12_377::new(aff_x, aff_y, false)
}
pub fn from_ark(ark: G1Projective_BLS12_377) -> Point {
use ark_ff::Field;
let z_inv = ark.z.inverse().unwrap();
let z_invsq = z_inv * z_inv;
let z_invq3 = z_invsq * z_inv;
Point {
x: Base::from_ark((ark.x * z_invsq).into_repr()),
y: Base::from_ark((ark.y * z_invq3).into_repr()),
z: Base::one(),
}
}
}
impl PointAffineNoInfinity {
pub fn to_ark(&self) -> G1Affine_BLS12_377 {
G1Affine_BLS12_377::new(Fq_BLS12_377::new(self.x.to_ark()), Fq_BLS12_377::new(self.y.to_ark()), false)
}
pub fn to_ark_repr(&self) -> G1Affine_BLS12_377 {
G1Affine_BLS12_377::new(
Fq_BLS12_377::from_repr(self.x.to_ark()).unwrap(),
Fq_BLS12_377::from_repr(self.y.to_ark()).unwrap(),
false,
)
}
pub fn from_ark(p: &G1Affine_BLS12_377) -> Self {
PointAffineNoInfinity {
x: Base::from_ark(p.x.into_repr()),
y: Base::from_ark(p.y.into_repr()),
}
}
}
impl Point {
pub fn to_affine(&self) -> PointAffineNoInfinity {
let ark_affine = self.to_ark_affine();
PointAffineNoInfinity {
x: Base::from_ark(ark_affine.x.into_repr()),
y: Base::from_ark(ark_affine.y.into_repr()),
}
}
}
#[cfg(test)]
pub(crate) mod tests_bls12_377 {
use std::ops::Add;
use ark_bls12_377::{Fr, G1Affine, G1Projective};
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
use ark_ff::{FftField, Field, Zero, PrimeField};
use ark_std::UniformRand;
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
use crate::curve_structs::{Point, Scalar, Base};
use crate::basic_structs::scalar::ScalarTrait;
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
let mut rng = ark_std::rand::thread_rng();
let mut points_ga: Vec<G1Projective> = Vec::new();
for _ in 0..nof_elements {
let aff = G1Projective::rand(&mut rng);
points_ga.push(aff);
}
points_ga
}
fn ecntt_arc_naive(
points: &Vec<G1Projective>,
size: usize,
inverse: bool,
) -> Vec<G1Projective> {
let mut result: Vec<G1Projective> = Vec::new();
for _ in 0..size {
result.push(G1Projective::zero());
}
let rou: Fr;
if !inverse {
rou = Fr::get_root_of_unity(size).unwrap();
} else {
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
}
for k in 0..size {
for l in 0..size {
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
let mul_rou = Fr::pow(&rou, &pow);
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
}
}
if inverse {
let size2 = size as u64;
for k in 0..size {
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
result[k] = result[k].into_affine().mul(multfactor);
}
}
return result;
}
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
let mut eq = true;
for i in 0..points.len() {
if points2[i].ne(&points[i]) {
eq = false;
break;
}
}
return eq;
}
fn test_naive_ark_ecntt(size: usize) {
let points = random_points_ark_proj(size);
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
assert!(!check_eq(&result2, &result1));
assert!(check_eq(&result2, &points));
}
#[test]
fn test_msm() {
let test_sizes = [6, 9];
for pow2 in test_sizes {
let count = 1 << pow2;
let seed = None; // set Some to provide seed
let points = generate_random_points(count, get_rng(seed));
let scalars = generate_random_scalars(count, get_rng(seed));
let msm_result = msm(&points, &scalars, 0);
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
assert_eq!(msm_result.to_ark(), msm_result_ark);
assert_eq!(
msm_result.to_ark_affine(),
Point::from_ark(msm_result_ark).to_ark_affine()
);
}
}
#[test]
fn test_batch_msm() {
for batch_pow2 in [2, 4] {
for pow2 in [4, 6] {
let msm_size = 1 << pow2;
let batch_size = 1 << batch_pow2;
let seed = None; // set Some to provide seed
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
let expected: Vec<_> = point_r_ark
.chunks(msm_size)
.zip(scalars_r_ark.chunks(msm_size))
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
.collect();
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
assert_eq!(result, expected);
}
}
}
#[test]
fn test_commit() {
let test_size = 1 << 8;
let seed = Some(0);
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
let mut points = generate_random_points(test_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm(&points, &scalars, 0);
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
let mut h_commit_result = Point::zero();
d_commit_result.copy_to(&mut h_commit_result).unwrap();
assert_eq!(msm_result, h_commit_result);
assert_ne!(msm_result, Point::zero());
assert_ne!(h_commit_result, Point::zero());
}
#[test]
fn test_batch_commit() {
let batch_size = 4;
let test_size = 1 << 12;
let seed = Some(0);
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
let points = generate_random_points(test_size * batch_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
assert_eq!(msm_result, h_commit_result);
for h in h_commit_result {
assert_ne!(h, Point::zero());
}
}
#[test]
fn test_ntt() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 3;
let scalars = generate_random_scalars(test_size, get_rng(seed));
let mut ntt_result = scalars.clone();
ntt(&mut ntt_result, 0);
assert_ne!(ntt_result, scalars);
let mut intt_result = ntt_result.clone();
intt(&mut intt_result, 0);
assert_eq!(intt_result, scalars);
//ECNTT
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
test_naive_ark_ecntt(test_size);
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
//naive ark
let points_proj_ark = points_proj
.iter()
.map(|p| p.to_ark())
.collect::<Vec<G1Projective>>();
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
assert_eq!(points_proj_ark, iecntt_result_naive);
//ingo gpu
let mut ecntt_result = points_proj.to_vec();
ecntt(&mut ecntt_result, 0);
assert_ne!(ecntt_result, points_proj);
let mut iecntt_result = ecntt_result.clone();
iecntt(&mut iecntt_result, 0);
assert_eq!(
iecntt_result_naive,
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
assert_eq!(
iecntt_result
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>(),
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
}
#[test]
fn test_ntt_batch() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 5;
let batches = 4;
let scalars_batch: Vec<Scalar> =
generate_random_scalars(test_size * batches, get_rng(seed));
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
for i in 0..batches {
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result = scalars_batch.clone();
// do batch ntt
ntt_batch(&mut ntt_result, test_size, 0);
let mut ntt_result_vec_of_vec = Vec::new();
// do ntt for every chunk
for i in 0..batches {
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
ntt(&mut ntt_result_vec_of_vec[i], 0);
}
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
ntt_result_vec_of_vec[i],
ntt_result[i * test_size..(i + 1) * test_size]
);
}
// check that ntt output is different from input
assert_ne!(ntt_result, scalars_batch);
let mut intt_result = ntt_result.clone();
// do batch intt
intt_batch(&mut intt_result, test_size, 0);
let mut intt_result_vec_of_vec = Vec::new();
// do intt for every chunk
for i in 0..batches {
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
intt(&mut intt_result_vec_of_vec[i], 0);
}
// check that the intt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_vec_of_vec[i],
intt_result[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result, scalars_batch);
// //ECNTT
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
for i in 0..batches {
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result_points = points_proj.clone();
// do batch ecintt
ecntt_batch(&mut ntt_result_points, test_size, 0);
let mut ntt_result_points_vec_of_vec = Vec::new();
for i in 0..batches {
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
}
for i in 0..batches {
assert_eq!(
ntt_result_points_vec_of_vec[i],
ntt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_ne!(ntt_result_points, points_proj);
let mut intt_result_points = ntt_result_points.clone();
// do batch ecintt
iecntt_batch(&mut intt_result_points, test_size, 0);
let mut intt_result_points_vec_of_vec = Vec::new();
// do ecintt for every chunk
for i in 0..batches {
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
}
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_points_vec_of_vec[i],
intt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result_points, points_proj);
}
#[test]
fn test_scalar_interpolation() {
let log_test_size = 7;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
reverse_order_scalars(&mut d_evals);
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
intt(&mut evals_mut, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_scalar_batch_interpolation() {
let batch_size = 4;
let log_test_size = 10;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
reverse_order_scalars_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
intt_batch(&mut evals_mut, test_size, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_point_interpolation() {
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
reverse_order_points(&mut d_evals);
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
iecntt(&mut evals_mut[..], 0);
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_point_batch_interpolation() {
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
reverse_order_points_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
iecntt_batch(&mut evals_mut[..], test_size, 0);
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_scalar_evaluation() {
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
for i in coeff_size.. (1 << log_test_domain_size) {
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
}
}
#[test]
fn test_scalar_batch_evaluation() {
let batch_size = 6;
let log_test_domain_size = 8;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
for i in coeff_size..domain_size {
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
}
}
}
#[test]
fn test_point_evaluation() {
let log_test_domain_size = 7;
let coeff_size = 1 << 7;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
for i in coeff_size..(1 << log_test_domain_size) {
assert_eq!(Point::zero(), h_coeffs_domain[i]);
}
for i in 0..coeff_size {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation() {
let batch_size = 4;
let log_test_domain_size = 6;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 5;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
for i in coeff_size..domain_size {
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
}
for i in j * domain_size..(j * domain_size + coeff_size) {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
}
#[test]
fn test_scalar_evaluation_on_trivial_coset() {
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_coeffs, h_evals_coset);
}
#[test]
fn test_scalar_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
}
#[test]
fn test_scalar_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
}
#[test]
fn test_point_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
for i in 0..test_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 2;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
for i in 0..test_size * batch_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
#[test]
fn test_matrix_multiplication() {
let seed = None; // some value to fix the rng
let test_size = 1 << 5;
let rou = Fr::get_root_of_unity(test_size).unwrap();
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|row_num| { (0..test_size).map(
|col_num| {
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
}).collect::<Vec<Scalar>>()
}).flatten().collect::<Vec<_>>();
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
let mut ntt_result = vector.clone();
ntt(&mut ntt_result, 0);
// we don't use the same roots of unity as arkworks, so the results are permutations
// of one another and the only guaranteed fixed scalars are the following ones:
assert_eq!(result[0], ntt_result[0]);
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_scalar_mul() {
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
mult_sc_vec(&mut intoo, &expected, 0);
assert_eq!(intoo, expected);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_point_mul() {
let dummy_one = Point {
x: Base::one(),
y: Base::one(),
z: Base::one(),
};
let mut inout = [dummy_one, dummy_one, Point::zero()];
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
let expected = [dummy_one, Point::zero(), Point::zero()];
multp_vec(&mut inout, &scalars, 0);
assert_eq!(inout, expected);
}
}

34
bls12-381/Cargo.toml Normal file
View File

@@ -0,0 +1,34 @@
[package]
name = "bls12-381"
version = "0.1.0"
edition = "2021"
authors = [ "Ingonyama" ]
[dependencies]
icicle-core = { path = "../icicle-core" }
hex = "*"
ark-std = "0.3.0"
ark-ff = "0.3.0"
ark-poly = "0.3.0"
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
ark-bls12-381 = "0.3.0"
serde = { version = "1.0", features = ["derive"] }
serde_derive = "1.0"
serde_cbor = "0.11.2"
rustacuda = "0.1"
rustacuda_core = "0.1"
rustacuda_derive = "0.1"
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
[build-dependencies]
cc = { version = "1.0", features = ["parallel"] }
[dev-dependencies]
"criterion" = "0.4.0"
[features]
g2 = []

36
bls12-381/build.rs Normal file
View File

@@ -0,0 +1,36 @@
use std::env;
fn main() {
//TODO: check cargo features selected
//TODO: can conflict/duplicate with make ?
println!("cargo:rerun-if-env-changed=CXXFLAGS");
println!("cargo:rerun-if-changed=./icicle");
let arch_type = env::var("ARCH_TYPE").unwrap_or(String::from("native"));
let stream_type = env::var("DEFAULT_STREAM").unwrap_or(String::from("legacy"));
let mut arch = String::from("-arch=");
arch.push_str(&arch_type);
let mut stream = String::from("-default-stream=");
stream.push_str(&stream_type);
let mut nvcc = cc::Build::new();
println!("Compiling icicle library using arch: {}", &arch);
if cfg!(feature = "g2") {
nvcc.define("G2_DEFINED", None);
}
nvcc.cuda(true);
nvcc.define("FEATURE_BLS12_381", None);
nvcc.debug(false);
nvcc.flag(&arch);
nvcc.flag(&stream);
nvcc.shared_flag(false);
// nvcc.static_flag(true);
nvcc.files([
"../icicle-cuda/curves/index.cu",
]);
nvcc.compile("ingo_icicle"); //TODO: extension??
}

View File

@@ -0,0 +1,4 @@
pub trait Field<const NUM_LIMBS: usize> {
const MODOLUS: [u32;NUM_LIMBS];
const LIMBS: usize = NUM_LIMBS;
}

View File

@@ -0,0 +1,3 @@
pub mod field;
pub mod scalar;
pub mod point;

View File

@@ -0,0 +1,106 @@
use std::ffi::c_uint;
use ark_ec::AffineCurve;
use ark_ff::{BigInteger256, PrimeField};
use std::mem::transmute;
use ark_ff::Field;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use super::scalar::{get_fixed_limbs, self};
#[derive(Debug, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointT<BF: scalar::ScalarTrait> {
pub x: BF,
pub y: BF,
pub z: BF,
}
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
fn default() -> Self {
PointT::zero()
}
}
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
pub fn zero() -> Self {
PointT {
x: BF::zero(),
y: BF::one(),
z: BF::zero(),
}
}
pub fn infinity() -> Self {
Self::zero()
}
}
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointAffineNoInfinityT<BF> {
pub x: BF,
pub y: BF,
}
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
fn default() -> Self {
PointAffineNoInfinityT {
x: BF::zero(),
y: BF::zero(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
///From u32 limbs x,y
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
PointAffineNoInfinityT {
x: BF::from_limbs(x),
y: BF::from_limbs(y)
}
}
pub fn limbs(&self) -> Vec<u32> {
[self.x.limbs(), self.y.limbs()].concat()
}
pub fn to_projective(&self) -> PointT<BF> {
PointT {
x: self.x,
y: self.y,
z: BF::one(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
PointT {
x: BF::from_limbs(x),
y: BF::from_limbs(y),
z: BF::from_limbs(z)
}
}
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
let l = value.len();
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
PointT {
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
}
}
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
PointAffineNoInfinityT {
x: self.x,
y: self.y,
}
}
}

View File

@@ -0,0 +1,102 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination};
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use super::field::{Field, self};
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
match val.len() {
n if n < NUM_LIMBS => {
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
padded[..val.len()].copy_from_slice(&val);
padded
}
n if n == NUM_LIMBS => val.try_into().unwrap(),
_ => panic!("slice has too many elements"),
}
}
pub trait ScalarTrait{
fn base_limbs() -> usize;
fn zero() -> Self;
fn from_limbs(value: &[u32]) -> Self;
fn one() -> Self;
fn to_bytes_le(&self) -> Vec<u8>;
fn limbs(&self) -> &[u32];
}
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(C)]
pub struct ScalarT<M, const NUM_LIMBS: usize> {
pub(crate) phantom: PhantomData<M>,
pub(crate) value : [u32; NUM_LIMBS]
}
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
where
M: Field<NUM_LIMBS>,
{
fn base_limbs() -> usize {
return NUM_LIMBS;
}
fn zero() -> Self {
ScalarT {
value: [0u32; NUM_LIMBS],
phantom: PhantomData,
}
}
fn from_limbs(value: &[u32]) -> Self {
Self {
value: get_fixed_limbs(value),
phantom: PhantomData,
}
}
fn one() -> Self {
let mut s = [0u32; NUM_LIMBS];
s[0] = 1;
ScalarT { value: s, phantom: PhantomData }
}
fn to_bytes_le(&self) -> Vec<u8> {
self.value
.iter()
.map(|s| s.to_le_bytes().to_vec())
.flatten()
.collect::<Vec<_>>()
}
fn limbs(&self) -> &[u32] {
&self.value
}
}
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
Self::from_limbs(value)
}
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
let mut value = value.to_vec();
value.reverse();
Self::from_limbs_le(&value)
}
// Additional Functions
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
}
}

View File

@@ -0,0 +1,62 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
use std::marker::PhantomData;
use std::convert::TryInto;
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
use crate::basic_structs::scalar::ScalarT;
use crate::basic_structs::field::Field;
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct ScalarField;
impl Field<8> for ScalarField {
const MODOLUS: [u32; 8] = [0x0;8];
}
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct BaseField;
impl Field<12> for BaseField {
const MODOLUS: [u32; 12] = [0x0;12];
}
pub type Scalar = ScalarT<ScalarField,8>;
impl Default for Scalar {
fn default() -> Self {
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Scalar{}
pub type Base = ScalarT<BaseField,12>;
impl Default for Base {
fn default() -> Self {
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Base{}
pub type Point = PointT<Base>;
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
extern "C" {
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
unsafe { eq(self, other) != 0 }
}
}

798
bls12-381/src/from_cuda.rs Normal file
View File

@@ -0,0 +1,798 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
use crate::basic_structs::scalar::ScalarTrait;
use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bls12_381::{Fq as Fq_BLS12_381, Fr as Fr_BLS12_381, G1Affine as G1Affine_BLS12_381, G1Projective as G1Projective_BLS12_381};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
extern "C" {
fn msm_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
count: usize,
device_id: usize,
) -> c_uint;
fn msm_batch_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
batch_size: usize,
msm_size: usize,
device_id: usize,
) -> c_uint;
fn commit_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
device_id: usize,
) -> c_uint;
fn commit_batch_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
batch_size: usize,
device_id: usize,
) -> c_uint;
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ntt_batch_cuda(
inout: *mut Scalar,
arr_size: usize,
n: usize,
inverse: bool,
) -> c_int;
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
fn interpolate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn interpolate_points_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_points_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn reverse_order_scalars_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_scalars_batch_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_cuda(
d_arr: DevicePointer<Point>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_batch_cuda(
d_arr: DevicePointer<Point>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn vec_mod_mult_point(
inout: *mut Point,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn vec_mod_mult_scalar(
inout: *mut Scalar,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn matrix_vec_mod_mult(
matrix_flattened: *const Scalar,
input: *const Scalar,
output: *mut Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
}
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = Point::zero();
unsafe {
msm_cuda(
&mut ret as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
scalars.len(),
device_id,
)
};
ret
}
pub fn msm_batch(
points: &[PointAffineNoInfinity],
scalars: &[Scalar],
batch_size: usize,
device_id: usize,
) -> Vec<Point> {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = vec![Point::zero(); batch_size];
unsafe {
msm_batch_cuda(
&mut ret[0] as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
batch_size,
count / batch_size,
device_id,
)
};
ret
}
pub fn commit(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
) -> DeviceBox<Point> {
let mut res = DeviceBox::new(&Point::zero()).unwrap();
unsafe {
commit_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len(),
0,
);
}
return res;
}
pub fn commit_batch(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
unsafe {
commit_batch_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len() / batch_size,
batch_size,
0,
);
}
return res;
}
/// Compute an in-place NTT on the input data.
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
let ret_code = unsafe {
ntt_cuda(
values as *mut _ as *mut Scalar,
values.len(),
inverse,
device_id,
)
};
ret_code
}
pub fn ntt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, false);
}
pub fn intt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, true);
}
/// Compute an in-place NTT on the input data.
fn ntt_internal_batch(
values: &mut [Scalar],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ntt_batch_cuda(
values as *mut _ as *mut Scalar,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, false);
}
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, true);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
unsafe {
ecntt_cuda(
values as *mut _ as *mut Point,
values.len(),
inverse,
device_id,
)
}
}
pub fn ecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, false, device_id);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, true, device_id);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal_batch(
values: &mut [Point],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ecntt_batch_cuda(
values as *mut _ as *mut Point,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, false);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, true);
}
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
unsafe {
DeviceBuffer::from_raw_parts(build_domain_cuda(
domain_size,
logn,
inverse,
0
), domain_size)
}
}
pub fn reverse_order_scalars(
d_scalars: &mut DeviceBuffer<Scalar>,
) {
unsafe { reverse_order_scalars_cuda(
d_scalars.as_device_ptr(),
d_scalars.len(),
0
); }
}
pub fn reverse_order_scalars_batch(
d_scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) {
unsafe { reverse_order_scalars_batch_cuda(
d_scalars.as_device_ptr(),
d_scalars.len() / batch_size,
batch_size,
0
); }
}
pub fn reverse_order_points(
d_points: &mut DeviceBuffer<Point>,
) {
unsafe { reverse_order_points_cuda(
d_points.as_device_ptr(),
d_points.len(),
0
); }
}
pub fn reverse_order_points_batch(
d_points: &mut DeviceBuffer<Point>,
batch_size: usize,
) {
unsafe { reverse_order_points_batch_cuda(
d_points.as_device_ptr(),
d_points.len() / batch_size,
batch_size,
0
); }
}
pub fn interpolate_scalars(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_scalars_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_scalars_batch(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_scalars_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn interpolate_points(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_points_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_points_batch(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_points_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn evaluate_scalars(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_scalars_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_points(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_points_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_point(
a as *mut _ as *mut Point,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_scalar(
a as *mut _ as *mut Scalar,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
// Multiply a matrix by a scalar:
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
let mut c = Vec::with_capacity(b.len());
for i in 0..b.len() {
c.push(Scalar::zero());
}
unsafe {
matrix_vec_mod_mult(
a as *const _ as *const Scalar,
b as *const _ as *const Scalar,
c.as_mut_slice() as *mut _ as *mut Scalar,
b.len(),
device_id,
);
}
c
}
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
unsafe { buf_cpy.copy_from(buf) };
return buf_cpy;
}
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
let rng: Box<dyn RngCore> = match seed {
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
None => Box::new(rand::thread_rng()),
};
rng
}
fn set_up_device() {
// Set up the context, load the module, and create a stream to run kernels in.
rustacuda::init(CudaFlags::empty()).unwrap();
let device = Device::get_device(0).unwrap();
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
}
pub fn generate_random_points(
count: usize,
mut rng: Box<dyn RngCore>,
) -> Vec<PointAffineNoInfinity> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BLS12_381::rand(&mut rng)).to_xy_strip_z())
.collect()
}
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BLS12_381::rand(&mut rng)))
.collect()
}
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
(0..count)
.map(|_| Scalar::from_ark(Fr_BLS12_381::rand(&mut rng).into_repr()))
.collect()
}
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalar
let vector = generate_random_points_proj(test_size, get_rng(seed));
let mut vector_mut = vector.clone();
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
(vector_mut, d_vector, d_domain)
}
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalars
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
(vector_mut, d_vector, d_domain)
}

4
bls12-381/src/lib.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod test_bls12_381;
pub mod basic_structs;
pub mod from_cuda;
pub mod curve_structs;

View File

@@ -0,0 +1,816 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
pub use crate::basic_structs::scalar::ScalarTrait;
pub use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bls12_381::{Fq as Fq_BLS12_381, Fr as Fr_BLS12_381, G1Affine as G1Affine_BLS12_381, G1Projective as G1Projective_BLS12_381};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
impl Scalar {
pub fn to_biginteger254(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn to_ark(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_biginteger256(ark: BigInteger256) -> Self {
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
}
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
unsafe { transmute(*self) }
}
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
}
pub fn to_ark_transmute(&self) -> Fr_BLS12_381 {
unsafe { std::mem::transmute(*self) }
}
pub fn from_ark_transmute(v: &Fr_BLS12_381) -> Scalar {
unsafe { std::mem::transmute_copy(v) }
}
pub fn to_ark_mod_p(&self) -> Fr_BLS12_381 {
Fr_BLS12_381::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
}
pub fn to_ark_repr(&self) -> Fr_BLS12_381 {
Fr_BLS12_381::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
}
pub fn from_ark(v: BigInteger256) -> Scalar {
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
}
}
impl Base {
pub fn to_ark(&self) -> BigInteger384 {
BigInteger384::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_ark(ark: BigInteger384) -> Self {
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
}
}
impl Point {
pub fn to_ark(&self) -> G1Projective_BLS12_381 {
self.to_ark_affine().into_projective()
}
pub fn to_ark_affine(&self) -> G1Affine_BLS12_381 {
//TODO: generic conversion
use ark_ff::Field;
use std::ops::Mul;
let proj_x_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.x.to_bytes_le());
let proj_y_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.y.to_bytes_le());
let proj_z_field = Fq_BLS12_381::from_le_bytes_mod_order(&self.z.to_bytes_le());
let inverse_z = proj_z_field.inverse().unwrap();
let aff_x = proj_x_field.mul(inverse_z);
let aff_y = proj_y_field.mul(inverse_z);
G1Affine_BLS12_381::new(aff_x, aff_y, false)
}
pub fn from_ark(ark: G1Projective_BLS12_381) -> Point {
use ark_ff::Field;
let z_inv = ark.z.inverse().unwrap();
let z_invsq = z_inv * z_inv;
let z_invq3 = z_invsq * z_inv;
Point {
x: Base::from_ark((ark.x * z_invsq).into_repr()),
y: Base::from_ark((ark.y * z_invq3).into_repr()),
z: Base::one(),
}
}
}
impl PointAffineNoInfinity {
pub fn to_ark(&self) -> G1Affine_BLS12_381 {
G1Affine_BLS12_381::new(Fq_BLS12_381::new(self.x.to_ark()), Fq_BLS12_381::new(self.y.to_ark()), false)
}
pub fn to_ark_repr(&self) -> G1Affine_BLS12_381 {
G1Affine_BLS12_381::new(
Fq_BLS12_381::from_repr(self.x.to_ark()).unwrap(),
Fq_BLS12_381::from_repr(self.y.to_ark()).unwrap(),
false,
)
}
pub fn from_ark(p: &G1Affine_BLS12_381) -> Self {
PointAffineNoInfinity {
x: Base::from_ark(p.x.into_repr()),
y: Base::from_ark(p.y.into_repr()),
}
}
}
impl Point {
pub fn to_affine(&self) -> PointAffineNoInfinity {
let ark_affine = self.to_ark_affine();
PointAffineNoInfinity {
x: Base::from_ark(ark_affine.x.into_repr()),
y: Base::from_ark(ark_affine.y.into_repr()),
}
}
}
#[cfg(test)]
pub(crate) mod tests_bls12_381 {
use std::ops::Add;
use ark_bls12_381::{Fr, G1Affine, G1Projective};
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
use ark_ff::{FftField, Field, Zero, PrimeField};
use ark_std::UniformRand;
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
use crate::curve_structs::{Point, Scalar, Base};
use crate::basic_structs::scalar::ScalarTrait;
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
let mut rng = ark_std::rand::thread_rng();
let mut points_ga: Vec<G1Projective> = Vec::new();
for _ in 0..nof_elements {
let aff = G1Projective::rand(&mut rng);
points_ga.push(aff);
}
points_ga
}
fn ecntt_arc_naive(
points: &Vec<G1Projective>,
size: usize,
inverse: bool,
) -> Vec<G1Projective> {
let mut result: Vec<G1Projective> = Vec::new();
for _ in 0..size {
result.push(G1Projective::zero());
}
let rou: Fr;
if !inverse {
rou = Fr::get_root_of_unity(size).unwrap();
} else {
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
}
for k in 0..size {
for l in 0..size {
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
let mul_rou = Fr::pow(&rou, &pow);
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
}
}
if inverse {
let size2 = size as u64;
for k in 0..size {
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
result[k] = result[k].into_affine().mul(multfactor);
}
}
return result;
}
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
let mut eq = true;
for i in 0..points.len() {
if points2[i].ne(&points[i]) {
eq = false;
break;
}
}
return eq;
}
fn test_naive_ark_ecntt(size: usize) {
let points = random_points_ark_proj(size);
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
assert!(!check_eq(&result2, &result1));
assert!(check_eq(&result2, &points));
}
#[test]
fn test_msm() {
let test_sizes = [6, 9];
for pow2 in test_sizes {
let count = 1 << pow2;
let seed = None; // set Some to provide seed
let points = generate_random_points(count, get_rng(seed));
let scalars = generate_random_scalars(count, get_rng(seed));
let msm_result = msm(&points, &scalars, 0);
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
assert_eq!(msm_result.to_ark(), msm_result_ark);
assert_eq!(
msm_result.to_ark_affine(),
Point::from_ark(msm_result_ark).to_ark_affine()
);
}
}
#[test]
fn test_batch_msm() {
for batch_pow2 in [2, 4] {
for pow2 in [4, 6] {
let msm_size = 1 << pow2;
let batch_size = 1 << batch_pow2;
let seed = None; // set Some to provide seed
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
let expected: Vec<_> = point_r_ark
.chunks(msm_size)
.zip(scalars_r_ark.chunks(msm_size))
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
.collect();
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
assert_eq!(result, expected);
}
}
}
#[test]
fn test_commit() {
let test_size = 1 << 8;
let seed = Some(0);
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
let mut points = generate_random_points(test_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm(&points, &scalars, 0);
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
let mut h_commit_result = Point::zero();
d_commit_result.copy_to(&mut h_commit_result).unwrap();
assert_eq!(msm_result, h_commit_result);
assert_ne!(msm_result, Point::zero());
assert_ne!(h_commit_result, Point::zero());
}
#[test]
fn test_batch_commit() {
let batch_size = 4;
let test_size = 1 << 12;
let seed = Some(0);
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
let points = generate_random_points(test_size * batch_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
assert_eq!(msm_result, h_commit_result);
for h in h_commit_result {
assert_ne!(h, Point::zero());
}
}
#[test]
fn test_ntt() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 3;
let scalars = generate_random_scalars(test_size, get_rng(seed));
let mut ntt_result = scalars.clone();
ntt(&mut ntt_result, 0);
assert_ne!(ntt_result, scalars);
let mut intt_result = ntt_result.clone();
intt(&mut intt_result, 0);
assert_eq!(intt_result, scalars);
//ECNTT
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
test_naive_ark_ecntt(test_size);
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
//naive ark
let points_proj_ark = points_proj
.iter()
.map(|p| p.to_ark())
.collect::<Vec<G1Projective>>();
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
assert_eq!(points_proj_ark, iecntt_result_naive);
//ingo gpu
let mut ecntt_result = points_proj.to_vec();
ecntt(&mut ecntt_result, 0);
assert_ne!(ecntt_result, points_proj);
let mut iecntt_result = ecntt_result.clone();
iecntt(&mut iecntt_result, 0);
assert_eq!(
iecntt_result_naive,
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
assert_eq!(
iecntt_result
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>(),
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
}
#[test]
fn test_ntt_batch() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 5;
let batches = 4;
let scalars_batch: Vec<Scalar> =
generate_random_scalars(test_size * batches, get_rng(seed));
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
for i in 0..batches {
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result = scalars_batch.clone();
// do batch ntt
ntt_batch(&mut ntt_result, test_size, 0);
let mut ntt_result_vec_of_vec = Vec::new();
// do ntt for every chunk
for i in 0..batches {
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
ntt(&mut ntt_result_vec_of_vec[i], 0);
}
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
ntt_result_vec_of_vec[i],
ntt_result[i * test_size..(i + 1) * test_size]
);
}
// check that ntt output is different from input
assert_ne!(ntt_result, scalars_batch);
let mut intt_result = ntt_result.clone();
// do batch intt
intt_batch(&mut intt_result, test_size, 0);
let mut intt_result_vec_of_vec = Vec::new();
// do intt for every chunk
for i in 0..batches {
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
intt(&mut intt_result_vec_of_vec[i], 0);
}
// check that the intt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_vec_of_vec[i],
intt_result[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result, scalars_batch);
// //ECNTT
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
for i in 0..batches {
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result_points = points_proj.clone();
// do batch ecintt
ecntt_batch(&mut ntt_result_points, test_size, 0);
let mut ntt_result_points_vec_of_vec = Vec::new();
for i in 0..batches {
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
}
for i in 0..batches {
assert_eq!(
ntt_result_points_vec_of_vec[i],
ntt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_ne!(ntt_result_points, points_proj);
let mut intt_result_points = ntt_result_points.clone();
// do batch ecintt
iecntt_batch(&mut intt_result_points, test_size, 0);
let mut intt_result_points_vec_of_vec = Vec::new();
// do ecintt for every chunk
for i in 0..batches {
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
}
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_points_vec_of_vec[i],
intt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result_points, points_proj);
}
#[test]
fn test_scalar_interpolation() {
let log_test_size = 7;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
reverse_order_scalars(&mut d_evals);
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
intt(&mut evals_mut, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_scalar_batch_interpolation() {
let batch_size = 4;
let log_test_size = 10;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
reverse_order_scalars_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
intt_batch(&mut evals_mut, test_size, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_point_interpolation() {
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
reverse_order_points(&mut d_evals);
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
iecntt(&mut evals_mut[..], 0);
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_point_batch_interpolation() {
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
reverse_order_points_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
iecntt_batch(&mut evals_mut[..], test_size, 0);
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_scalar_evaluation() {
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
for i in coeff_size.. (1 << log_test_domain_size) {
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
}
}
#[test]
fn test_scalar_batch_evaluation() {
let batch_size = 6;
let log_test_domain_size = 8;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
for i in coeff_size..domain_size {
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
}
}
}
#[test]
fn test_point_evaluation() {
let log_test_domain_size = 7;
let coeff_size = 1 << 7;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
for i in coeff_size..(1 << log_test_domain_size) {
assert_eq!(Point::zero(), h_coeffs_domain[i]);
}
for i in 0..coeff_size {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation() {
let batch_size = 4;
let log_test_domain_size = 6;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 5;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
for i in coeff_size..domain_size {
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
}
for i in j * domain_size..(j * domain_size + coeff_size) {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
}
#[test]
fn test_scalar_evaluation_on_trivial_coset() {
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_coeffs, h_evals_coset);
}
#[test]
fn test_scalar_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
}
#[test]
fn test_scalar_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
}
#[test]
fn test_point_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
for i in 0..test_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 2;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
for i in 0..test_size * batch_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
#[test]
fn test_matrix_multiplication() {
let seed = None; // some value to fix the rng
let test_size = 1 << 5;
let rou = Fr::get_root_of_unity(test_size).unwrap();
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|row_num| { (0..test_size).map(
|col_num| {
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
}).collect::<Vec<Scalar>>()
}).flatten().collect::<Vec<_>>();
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
let mut ntt_result = vector.clone();
ntt(&mut ntt_result, 0);
// we don't use the same roots of unity as arkworks, so the results are permutations
// of one another and the only guaranteed fixed scalars are the following ones:
assert_eq!(result[0], ntt_result[0]);
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_scalar_mul() {
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
mult_sc_vec(&mut intoo, &expected, 0);
assert_eq!(intoo, expected);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_point_mul() {
let dummy_one = Point {
x: Base::one(),
y: Base::one(),
z: Base::one(),
};
let mut inout = [dummy_one, dummy_one, Point::zero()];
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
let expected = [dummy_one, Point::zero(), Point::zero()];
multp_vec(&mut inout, &scalars, 0);
assert_eq!(inout, expected);
}
}

34
bn254/Cargo.toml Normal file
View File

@@ -0,0 +1,34 @@
[package]
name = "bn254"
version = "0.1.0"
edition = "2021"
authors = [ "Ingonyama" ]
[dependencies]
icicle-core = { path = "../icicle-core" }
hex = "*"
ark-std = "0.3.0"
ark-ff = "0.3.0"
ark-poly = "0.3.0"
ark-ec = { version = "0.3.0", features = [ "parallel" ] }
ark-bn254 = "0.3.0"
serde = { version = "1.0", features = ["derive"] }
serde_derive = "1.0"
serde_cbor = "0.11.2"
rustacuda = "0.1"
rustacuda_core = "0.1"
rustacuda_derive = "0.1"
rand = "*" #TODO: move rand and ark dependencies to dev once random scalar/point generation is done "natively"
[build-dependencies]
cc = { version = "1.0", features = ["parallel"] }
[dev-dependencies]
"criterion" = "0.4.0"
[features]
g2 = []

36
bn254/build.rs Normal file
View File

@@ -0,0 +1,36 @@
use std::env;
fn main() {
//TODO: check cargo features selected
//TODO: can conflict/duplicate with make ?
println!("cargo:rerun-if-env-changed=CXXFLAGS");
println!("cargo:rerun-if-changed=./icicle");
let arch_type = env::var("ARCH_TYPE").unwrap_or(String::from("native"));
let stream_type = env::var("DEFAULT_STREAM").unwrap_or(String::from("legacy"));
let mut arch = String::from("-arch=");
arch.push_str(&arch_type);
let mut stream = String::from("-default-stream=");
stream.push_str(&stream_type);
let mut nvcc = cc::Build::new();
println!("Compiling icicle library using arch: {}", &arch);
if cfg!(feature = "g2") {
nvcc.define("G2_DEFINED", None);
}
nvcc.cuda(true);
nvcc.define("FEATURE_BN254", None);
nvcc.debug(false);
nvcc.flag(&arch);
nvcc.flag(&stream);
nvcc.shared_flag(false);
// nvcc.static_flag(true);
nvcc.files([
"../icicle-cuda/curves/index.cu",
]);
nvcc.compile("ingo_icicle"); //TODO: extension??
}

View File

@@ -0,0 +1,4 @@
pub trait Field<const NUM_LIMBS: usize> {
const MODOLUS: [u32;NUM_LIMBS];
const LIMBS: usize = NUM_LIMBS;
}

View File

@@ -0,0 +1,3 @@
pub mod field;
pub mod scalar;
pub mod point;

View File

@@ -0,0 +1,108 @@
use std::ffi::c_uint;
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger256, PrimeField};
use std::mem::transmute;
use ark_ff::Field;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use super::scalar::{get_fixed_limbs, self};
#[derive(Debug, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointT<BF: scalar::ScalarTrait> {
pub x: BF,
pub y: BF,
pub z: BF,
}
impl<BF: DeviceCopy + scalar::ScalarTrait> Default for PointT<BF> {
fn default() -> Self {
PointT::zero()
}
}
impl<BF: DeviceCopy + scalar::ScalarTrait> PointT<BF> {
pub fn zero() -> Self {
PointT {
x: BF::zero(),
y: BF::one(),
z: BF::zero(),
}
}
pub fn infinity() -> Self {
Self::zero()
}
}
#[derive(Debug, PartialEq, Clone, Copy, DeviceCopy)]
#[repr(C)]
pub struct PointAffineNoInfinityT<BF> {
pub x: BF,
pub y: BF,
}
impl<BF: scalar::ScalarTrait> Default for PointAffineNoInfinityT<BF> {
fn default() -> Self {
PointAffineNoInfinityT {
x: BF::zero(),
y: BF::zero(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointAffineNoInfinityT<BF> {
///From u32 limbs x,y
pub fn from_limbs(x: &[u32], y: &[u32]) -> Self {
PointAffineNoInfinityT {
x: BF::from_limbs(x),
y: BF::from_limbs(y)
}
}
pub fn limbs(&self) -> Vec<u32> {
[self.x.limbs(), self.y.limbs()].concat()
}
pub fn to_projective(&self) -> PointT<BF> {
PointT {
x: self.x,
y: self.y,
z: BF::one(),
}
}
}
impl<BF: Copy + scalar::ScalarTrait> PointT<BF> {
pub fn from_limbs(x: &[u32], y: &[u32], z: &[u32]) -> Self {
PointT {
x: BF::from_limbs(x),
y: BF::from_limbs(y),
z: BF::from_limbs(z)
}
}
pub fn from_xy_limbs(value: &[u32]) -> PointT<BF> {
let l = value.len();
assert_eq!(l, 3 * BF::base_limbs(), "length must be 3 * {}", BF::base_limbs());
PointT {
x: BF::from_limbs(value[..BF::base_limbs()].try_into().unwrap()),
y: BF::from_limbs(value[BF::base_limbs()..BF::base_limbs() * 2].try_into().unwrap()),
z: BF::from_limbs(value[BF::base_limbs() * 2..].try_into().unwrap())
}
}
pub fn to_xy_strip_z(&self) -> PointAffineNoInfinityT<BF> {
PointAffineNoInfinityT {
x: self.x,
y: self.y,
}
}
}

View File

@@ -0,0 +1,102 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_core::DeviceCopy;
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination};
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use super::field::{Field, self};
pub fn get_fixed_limbs<const NUM_LIMBS: usize>(val: &[u32]) -> [u32; NUM_LIMBS] {
match val.len() {
n if n < NUM_LIMBS => {
let mut padded: [u32; NUM_LIMBS] = [0; NUM_LIMBS];
padded[..val.len()].copy_from_slice(&val);
padded
}
n if n == NUM_LIMBS => val.try_into().unwrap(),
_ => panic!("slice has too many elements"),
}
}
pub trait ScalarTrait{
fn base_limbs() -> usize;
fn zero() -> Self;
fn from_limbs(value: &[u32]) -> Self;
fn one() -> Self;
fn to_bytes_le(&self) -> Vec<u8>;
fn limbs(&self) -> &[u32];
}
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(C)]
pub struct ScalarT<M, const NUM_LIMBS: usize> {
pub(crate) phantom: PhantomData<M>,
pub(crate) value : [u32; NUM_LIMBS]
}
impl<M, const NUM_LIMBS: usize> ScalarTrait for ScalarT<M, NUM_LIMBS>
where
M: Field<NUM_LIMBS>,
{
fn base_limbs() -> usize {
return NUM_LIMBS;
}
fn zero() -> Self {
ScalarT {
value: [0u32; NUM_LIMBS],
phantom: PhantomData,
}
}
fn from_limbs(value: &[u32]) -> Self {
Self {
value: get_fixed_limbs(value),
phantom: PhantomData,
}
}
fn one() -> Self {
let mut s = [0u32; NUM_LIMBS];
s[0] = 1;
ScalarT { value: s, phantom: PhantomData }
}
fn to_bytes_le(&self) -> Vec<u8> {
self.value
.iter()
.map(|s| s.to_le_bytes().to_vec())
.flatten()
.collect::<Vec<_>>()
}
fn limbs(&self) -> &[u32] {
&self.value
}
}
impl<M, const NUM_LIMBS: usize> ScalarT<M, NUM_LIMBS> where M: field::Field<NUM_LIMBS>{
pub fn from_limbs_le(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
Self::from_limbs(value)
}
pub fn from_limbs_be(value: &[u32]) -> ScalarT<M,NUM_LIMBS> {
let mut value = value.to_vec();
value.reverse();
Self::from_limbs_le(&value)
}
// Additional Functions
pub fn add(&self, other:ScalarT<M, NUM_LIMBS>) -> ScalarT<M,NUM_LIMBS>{ // overload +
return ScalarT{value: [self.value[0] + other.value[0];NUM_LIMBS], phantom: PhantomData };
}
}

View File

@@ -0,0 +1,62 @@
use std::ffi::{c_int, c_uint};
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda_derive::DeviceCopy;
use std::mem::transmute;
use rustacuda::prelude::*;
use rustacuda_core::DevicePointer;
use rustacuda::memory::{DeviceBox, CopyDestination, DeviceCopy};
use std::marker::PhantomData;
use std::convert::TryInto;
use crate::basic_structs::point::{PointT, PointAffineNoInfinityT};
use crate::basic_structs::scalar::ScalarT;
use crate::basic_structs::field::Field;
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct ScalarField;
impl Field<8> for ScalarField {
const MODOLUS: [u32; 8] = [0x0;8];
}
#[derive(Debug, PartialEq, Clone, Copy,DeviceCopy)]
#[repr(C)]
pub struct BaseField;
impl Field<8> for BaseField {
const MODOLUS: [u32; 8] = [0x0;8];
}
pub type Scalar = ScalarT<ScalarField,8>;
impl Default for Scalar {
fn default() -> Self {
Self{value: [0x0;ScalarField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Scalar{}
pub type Base = ScalarT<BaseField,8>;
impl Default for Base {
fn default() -> Self {
Self{value: [0x0;BaseField::LIMBS], phantom: PhantomData }
}
}
unsafe impl DeviceCopy for Base{}
pub type Point = PointT<Base>;
pub type PointAffineNoInfinity = PointAffineNoInfinityT<Base>;
extern "C" {
fn eq(point1: *const Point, point2: *const Point) -> c_uint;
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
unsafe { eq(self, other) != 0 }
}
}

797
bn254/src/from_cuda.rs Normal file
View File

@@ -0,0 +1,797 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
use crate::basic_structs::scalar::ScalarTrait;
use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
extern "C" {
fn msm_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
count: usize,
device_id: usize,
) -> c_uint;
fn msm_batch_cuda(
out: *mut Point,
points: *const PointAffineNoInfinity,
scalars: *const Scalar,
batch_size: usize,
msm_size: usize,
device_id: usize,
) -> c_uint;
fn commit_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
device_id: usize,
) -> c_uint;
fn commit_batch_cuda(
d_out: DevicePointer<Point>,
d_scalars: DevicePointer<Scalar>,
d_points: DevicePointer<PointAffineNoInfinity>,
count: usize,
batch_size: usize,
device_id: usize,
) -> c_uint;
fn build_domain_cuda(domain_size: usize, logn: usize, inverse: bool, device_id: usize) -> DevicePointer<Scalar>;
fn ntt_cuda(inout: *mut Scalar, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ecntt_cuda(inout: *mut Point, n: usize, inverse: bool, device_id: usize) -> c_int;
fn ntt_batch_cuda(
inout: *mut Scalar,
arr_size: usize,
n: usize,
inverse: bool,
) -> c_int;
fn ecntt_batch_cuda(inout: *mut Point, arr_size: usize, n: usize, inverse: bool) -> c_int;
fn interpolate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_evaluations: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn interpolate_points_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn interpolate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_evaluations: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_points_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
device_id: usize
) -> c_int;
fn evaluate_points_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_scalars_on_coset_batch_cuda(
d_out: DevicePointer<Scalar>,
d_coefficients: DevicePointer<Scalar>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn evaluate_points_on_coset_batch_cuda(
d_out: DevicePointer<Point>,
d_coefficients: DevicePointer<Point>,
d_domain: DevicePointer<Scalar>,
domain_size: usize,
n: usize,
batch_size: usize,
coset_powers: DevicePointer<Scalar>,
device_id: usize
) -> c_int;
fn reverse_order_scalars_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_scalars_batch_cuda(
d_arr: DevicePointer<Scalar>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_cuda(
d_arr: DevicePointer<Point>,
n: usize,
device_id: usize
) -> c_int;
fn reverse_order_points_batch_cuda(
d_arr: DevicePointer<Point>,
n: usize,
batch_size: usize,
device_id: usize
) -> c_int;
fn vec_mod_mult_point(
inout: *mut Point,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn vec_mod_mult_scalar(
inout: *mut Scalar,
scalars: *const Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
fn matrix_vec_mod_mult(
matrix_flattened: *const Scalar,
input: *const Scalar,
output: *mut Scalar,
n_elements: usize,
device_id: usize,
) -> c_int;
}
pub fn msm(points: &[PointAffineNoInfinity], scalars: &[Scalar], device_id: usize) -> Point {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = Point::zero();
unsafe {
msm_cuda(
&mut ret as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
scalars.len(),
device_id,
)
};
ret
}
pub fn msm_batch(
points: &[PointAffineNoInfinity],
scalars: &[Scalar],
batch_size: usize,
device_id: usize,
) -> Vec<Point> {
let count = points.len();
if count != scalars.len() {
todo!("variable length")
}
let mut ret = vec![Point::zero(); batch_size];
unsafe {
msm_batch_cuda(
&mut ret[0] as *mut _ as *mut Point,
points as *const _ as *const PointAffineNoInfinity,
scalars as *const _ as *const Scalar,
batch_size,
count / batch_size,
device_id,
)
};
ret
}
pub fn commit(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
) -> DeviceBox<Point> {
let mut res = DeviceBox::new(&Point::zero()).unwrap();
unsafe {
commit_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len(),
0,
);
}
return res;
}
pub fn commit_batch(
points: &mut DeviceBuffer<PointAffineNoInfinity>,
scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(batch_size).unwrap() };
unsafe {
commit_batch_cuda(
res.as_device_ptr(),
scalars.as_device_ptr(),
points.as_device_ptr(),
scalars.len() / batch_size,
batch_size,
0,
);
}
return res;
}
/// Compute an in-place NTT on the input data.
fn ntt_internal(values: &mut [Scalar], device_id: usize, inverse: bool) -> i32 {
let ret_code = unsafe {
ntt_cuda(
values as *mut _ as *mut Scalar,
values.len(),
inverse,
device_id,
)
};
ret_code
}
pub fn ntt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, false);
}
pub fn intt(values: &mut [Scalar], device_id: usize) {
ntt_internal(values, device_id, true);
}
/// Compute an in-place NTT on the input data.
fn ntt_internal_batch(
values: &mut [Scalar],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ntt_batch_cuda(
values as *mut _ as *mut Scalar,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ntt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, false);
}
pub fn intt_batch(values: &mut [Scalar], batch_size: usize, device_id: usize) {
ntt_internal_batch(values, 0, batch_size, true);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal(values: &mut [Point], inverse: bool, device_id: usize) -> i32 {
unsafe {
ecntt_cuda(
values as *mut _ as *mut Point,
values.len(),
inverse,
device_id,
)
}
}
pub fn ecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, false, device_id);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt(values: &mut [Point], device_id: usize) {
ecntt_internal(values, true, device_id);
}
/// Compute an in-place ECNTT on the input data.
fn ecntt_internal_batch(
values: &mut [Point],
device_id: usize,
batch_size: usize,
inverse: bool,
) -> i32 {
unsafe {
ecntt_batch_cuda(
values as *mut _ as *mut Point,
values.len(),
batch_size,
inverse,
)
}
}
pub fn ecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, false);
}
/// Compute an in-place iECNTT on the input data.
pub fn iecntt_batch(values: &mut [Point], batch_size: usize, device_id: usize) {
ecntt_internal_batch(values, 0, batch_size, true);
}
pub fn build_domain(domain_size: usize, logn: usize, inverse: bool) -> DeviceBuffer<Scalar> {
unsafe {
DeviceBuffer::from_raw_parts(build_domain_cuda(
domain_size,
logn,
inverse,
0
), domain_size)
}
}
pub fn reverse_order_scalars(
d_scalars: &mut DeviceBuffer<Scalar>,
) {
unsafe { reverse_order_scalars_cuda(
d_scalars.as_device_ptr(),
d_scalars.len(),
0
); }
}
pub fn reverse_order_scalars_batch(
d_scalars: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) {
unsafe { reverse_order_scalars_batch_cuda(
d_scalars.as_device_ptr(),
d_scalars.len() / batch_size,
batch_size,
0
); }
}
pub fn reverse_order_points(
d_points: &mut DeviceBuffer<Point>,
) {
unsafe { reverse_order_points_cuda(
d_points.as_device_ptr(),
d_points.len(),
0
); }
}
pub fn reverse_order_points_batch(
d_points: &mut DeviceBuffer<Point>,
batch_size: usize,
) {
unsafe { reverse_order_points_batch_cuda(
d_points.as_device_ptr(),
d_points.len() / batch_size,
batch_size,
0
); }
}
pub fn interpolate_scalars(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_scalars_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_scalars_batch(
d_evaluations: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_scalars_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn interpolate_points(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe { interpolate_points_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
0
) };
return res;
}
pub fn interpolate_points_batch(
d_evaluations: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe { interpolate_points_batch_cuda(
res.as_device_ptr(),
d_evaluations.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
batch_size,
0
) };
return res;
}
pub fn evaluate_scalars(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_scalars_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_points(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
0
);
}
return res;
}
pub fn evaluate_points_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_scalars_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_scalars_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Scalar>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Scalar> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_scalars_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len()).unwrap() };
unsafe {
evaluate_points_on_coset_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len(),
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn evaluate_points_on_coset_batch(
d_coefficients: &mut DeviceBuffer<Point>,
d_domain: &mut DeviceBuffer<Scalar>,
batch_size: usize,
coset_powers: &mut DeviceBuffer<Scalar>,
) -> DeviceBuffer<Point> {
let mut res = unsafe { DeviceBuffer::uninitialized(d_domain.len() * batch_size).unwrap() };
unsafe {
evaluate_points_on_coset_batch_cuda(
res.as_device_ptr(),
d_coefficients.as_device_ptr(),
d_domain.as_device_ptr(),
d_domain.len(),
d_coefficients.len() / batch_size,
batch_size,
coset_powers.as_device_ptr(),
0
);
}
return res;
}
pub fn multp_vec(a: &mut [Point], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_point(
a as *mut _ as *mut Point,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
pub fn mult_sc_vec(a: &mut [Scalar], b: &[Scalar], device_id: usize) {
assert_eq!(a.len(), b.len());
unsafe {
vec_mod_mult_scalar(
a as *mut _ as *mut Scalar,
b as *const _ as *const Scalar,
a.len(),
device_id,
);
}
}
// Multiply a matrix by a scalar:
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
pub fn mult_matrix_by_vec(a: &[Scalar], b: &[Scalar], device_id: usize) -> Vec<Scalar> {
let mut c = Vec::with_capacity(b.len());
for i in 0..b.len() {
c.push(Scalar::zero());
}
unsafe {
matrix_vec_mod_mult(
a as *const _ as *const Scalar,
b as *const _ as *const Scalar,
c.as_mut_slice() as *mut _ as *mut Scalar,
b.len(),
device_id,
);
}
c
}
pub fn clone_buffer<T: DeviceCopy>(buf: &mut DeviceBuffer<T>) -> DeviceBuffer<T> {
let mut buf_cpy = unsafe { DeviceBuffer::uninitialized(buf.len()).unwrap() };
unsafe { buf_cpy.copy_from(buf) };
return buf_cpy;
}
pub fn get_rng(seed: Option<u64>) -> Box<dyn RngCore> {
let rng: Box<dyn RngCore> = match seed {
Some(seed) => Box::new(StdRng::seed_from_u64(seed)),
None => Box::new(rand::thread_rng()),
};
rng
}
fn set_up_device() {
// Set up the context, load the module, and create a stream to run kernels in.
rustacuda::init(CudaFlags::empty()).unwrap();
let device = Device::get_device(0).unwrap();
let _ctx = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device).unwrap();
}
pub fn generate_random_points(
count: usize,
mut rng: Box<dyn RngCore>,
) -> Vec<PointAffineNoInfinity> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BN254::rand(&mut rng)).to_xy_strip_z())
.collect()
}
pub fn generate_random_points_proj(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Point> {
(0..count)
.map(|_| Point::from_ark(G1Projective_BN254::rand(&mut rng)))
.collect()
}
pub fn generate_random_scalars(count: usize, mut rng: Box<dyn RngCore>) -> Vec<Scalar> {
(0..count)
.map(|_| Scalar::from_ark(Fr_BN254::rand(&mut rng).into_repr()))
.collect()
}
pub fn set_up_points(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Point>, DeviceBuffer<Point>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalar
let vector = generate_random_points_proj(test_size, get_rng(seed));
let mut vector_mut = vector.clone();
let mut d_vector = DeviceBuffer::from_slice(&vector[..]).unwrap();
(vector_mut, d_vector, d_domain)
}
pub fn set_up_scalars(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<Scalar>, DeviceBuffer<Scalar>, DeviceBuffer<Scalar>) {
set_up_device();
let d_domain = build_domain(1 << log_domain_size, log_domain_size, inverse);
let seed = Some(0); // fix the rng to get two equal scalars
let mut vector_mut = generate_random_scalars(test_size, get_rng(seed));
let mut d_vector = DeviceBuffer::from_slice(&vector_mut[..]).unwrap();
(vector_mut, d_vector, d_domain)
}

4
bn254/src/lib.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod test_bn254;
pub mod basic_structs;
pub mod from_cuda;
pub mod curve_structs;

816
bn254/src/test_bn254.rs Normal file
View File

@@ -0,0 +1,816 @@
use std::ffi::{c_int, c_uint};
use ark_std::UniformRand;
use rand::{rngs::StdRng, RngCore, SeedableRng};
use rustacuda::CudaFlags;
use rustacuda::memory::DeviceBox;
use rustacuda::prelude::{DeviceBuffer, Device, ContextFlags, Context};
use rustacuda_core::DevicePointer;
use std::mem::transmute;
pub use crate::basic_structs::scalar::ScalarTrait;
pub use crate::curve_structs::*;
use icicle_core::utils::{u32_vec_to_u64_vec, u64_vec_to_u32_vec};
use std::marker::PhantomData;
use std::convert::TryInto;
use ark_bn254::{Fq as Fq_BN254, Fr as Fr_BN254, G1Affine as G1Affine_BN254, G1Projective as G1Projective_BN254};
use ark_ec::AffineCurve;
use ark_ff::{BigInteger384, BigInteger256, PrimeField};
use rustacuda::memory::{CopyDestination, DeviceCopy};
impl Scalar {
pub fn to_biginteger254(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn to_ark(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_biginteger256(ark: BigInteger256) -> Self {
Self{ value: u64_vec_to_u32_vec(&ark.0).try_into().unwrap(), phantom : PhantomData}
}
pub fn to_biginteger256_transmute(&self) -> BigInteger256 {
unsafe { transmute(*self) }
}
pub fn from_biginteger_transmute(v: BigInteger256) -> Scalar {
Scalar{ value: unsafe{ transmute(v)}, phantom : PhantomData }
}
pub fn to_ark_transmute(&self) -> Fr_BN254 {
unsafe { std::mem::transmute(*self) }
}
pub fn from_ark_transmute(v: &Fr_BN254) -> Scalar {
unsafe { std::mem::transmute_copy(v) }
}
pub fn to_ark_mod_p(&self) -> Fr_BN254 {
Fr_BN254::new(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap()))
}
pub fn to_ark_repr(&self) -> Fr_BN254 {
Fr_BN254::from_repr(BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())).unwrap()
}
pub fn from_ark(v: BigInteger256) -> Scalar {
Self { value : u64_vec_to_u32_vec(&v.0).try_into().unwrap(), phantom: PhantomData}
}
}
impl Base {
pub fn to_ark(&self) -> BigInteger256 {
BigInteger256::new(u32_vec_to_u64_vec(&self.limbs()).try_into().unwrap())
}
pub fn from_ark(ark: BigInteger256) -> Self {
Self::from_limbs(&u64_vec_to_u32_vec(&ark.0))
}
}
impl Point {
pub fn to_ark(&self) -> G1Projective_BN254 {
self.to_ark_affine().into_projective()
}
pub fn to_ark_affine(&self) -> G1Affine_BN254 {
//TODO: generic conversion
use ark_ff::Field;
use std::ops::Mul;
let proj_x_field = Fq_BN254::from_le_bytes_mod_order(&self.x.to_bytes_le());
let proj_y_field = Fq_BN254::from_le_bytes_mod_order(&self.y.to_bytes_le());
let proj_z_field = Fq_BN254::from_le_bytes_mod_order(&self.z.to_bytes_le());
let inverse_z = proj_z_field.inverse().unwrap();
let aff_x = proj_x_field.mul(inverse_z);
let aff_y = proj_y_field.mul(inverse_z);
G1Affine_BN254::new(aff_x, aff_y, false)
}
pub fn from_ark(ark: G1Projective_BN254) -> Point {
use ark_ff::Field;
let z_inv = ark.z.inverse().unwrap();
let z_invsq = z_inv * z_inv;
let z_invq3 = z_invsq * z_inv;
Point {
x: Base::from_ark((ark.x * z_invsq).into_repr()),
y: Base::from_ark((ark.y * z_invq3).into_repr()),
z: Base::one(),
}
}
}
impl PointAffineNoInfinity {
pub fn to_ark(&self) -> G1Affine_BN254 {
G1Affine_BN254::new(Fq_BN254::new(self.x.to_ark()), Fq_BN254::new(self.y.to_ark()), false)
}
pub fn to_ark_repr(&self) -> G1Affine_BN254 {
G1Affine_BN254::new(
Fq_BN254::from_repr(self.x.to_ark()).unwrap(),
Fq_BN254::from_repr(self.y.to_ark()).unwrap(),
false,
)
}
pub fn from_ark(p: &G1Affine_BN254) -> Self {
PointAffineNoInfinity {
x: Base::from_ark(p.x.into_repr()),
y: Base::from_ark(p.y.into_repr()),
}
}
}
impl Point {
pub fn to_affine(&self) -> PointAffineNoInfinity {
let ark_affine = self.to_ark_affine();
PointAffineNoInfinity {
x: Base::from_ark(ark_affine.x.into_repr()),
y: Base::from_ark(ark_affine.y.into_repr()),
}
}
}
#[cfg(test)]
pub(crate) mod tests_bn254 {
use std::ops::Add;
use ark_bn254::{Fr, G1Affine, G1Projective};
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
use ark_ff::{FftField, Field, Zero, PrimeField};
use ark_std::UniformRand;
use rustacuda::prelude::{DeviceBuffer, CopyDestination};
use crate::curve_structs::{Point, Scalar, Base};
use crate::basic_structs::scalar::ScalarTrait;
use crate::from_cuda::{generate_random_points, get_rng, generate_random_scalars, msm, msm_batch, set_up_scalars, commit, commit_batch, ntt, intt, generate_random_points_proj, ecntt, iecntt, ntt_batch, ecntt_batch, iecntt_batch, intt_batch, reverse_order_scalars_batch, interpolate_scalars_batch, set_up_points, reverse_order_points, interpolate_points, reverse_order_points_batch, interpolate_points_batch, evaluate_scalars, interpolate_scalars, reverse_order_scalars, evaluate_points, build_domain, evaluate_scalars_on_coset, evaluate_points_on_coset, mult_matrix_by_vec, mult_sc_vec, multp_vec,evaluate_scalars_batch, evaluate_points_batch, evaluate_scalars_on_coset_batch, evaluate_points_on_coset_batch};
fn random_points_ark_proj(nof_elements: usize) -> Vec<G1Projective> {
let mut rng = ark_std::rand::thread_rng();
let mut points_ga: Vec<G1Projective> = Vec::new();
for _ in 0..nof_elements {
let aff = G1Projective::rand(&mut rng);
points_ga.push(aff);
}
points_ga
}
fn ecntt_arc_naive(
points: &Vec<G1Projective>,
size: usize,
inverse: bool,
) -> Vec<G1Projective> {
let mut result: Vec<G1Projective> = Vec::new();
for _ in 0..size {
result.push(G1Projective::zero());
}
let rou: Fr;
if !inverse {
rou = Fr::get_root_of_unity(size).unwrap();
} else {
rou = Fr::inverse(&Fr::get_root_of_unity(size).unwrap()).unwrap();
}
for k in 0..size {
for l in 0..size {
let pow: [u64; 1] = [(l * k).try_into().unwrap()];
let mul_rou = Fr::pow(&rou, &pow);
result[k] = result[k].add(points[l].into_affine().mul(mul_rou));
}
}
if inverse {
let size2 = size as u64;
for k in 0..size {
let multfactor = Fr::inverse(&Fr::from(size2)).unwrap();
result[k] = result[k].into_affine().mul(multfactor);
}
}
return result;
}
fn check_eq(points: &Vec<G1Projective>, points2: &Vec<G1Projective>) -> bool {
let mut eq = true;
for i in 0..points.len() {
if points2[i].ne(&points[i]) {
eq = false;
break;
}
}
return eq;
}
fn test_naive_ark_ecntt(size: usize) {
let points = random_points_ark_proj(size);
let result1: Vec<G1Projective> = ecntt_arc_naive(&points, size, false);
let result2: Vec<G1Projective> = ecntt_arc_naive(&result1, size, true);
assert!(!check_eq(&result2, &result1));
assert!(check_eq(&result2, &points));
}
#[test]
fn test_msm() {
let test_sizes = [6, 9];
for pow2 in test_sizes {
let count = 1 << pow2;
let seed = None; // set Some to provide seed
let points = generate_random_points(count, get_rng(seed));
let scalars = generate_random_scalars(count, get_rng(seed));
let msm_result = msm(&points, &scalars, 0);
let point_r_ark: Vec<_> = points.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars.iter().map(|x| x.to_ark()).collect();
let msm_result_ark = VariableBaseMSM::multi_scalar_mul(&point_r_ark, &scalars_r_ark);
assert_eq!(msm_result.to_ark_affine(), msm_result_ark);
assert_eq!(msm_result.to_ark(), msm_result_ark);
assert_eq!(
msm_result.to_ark_affine(),
Point::from_ark(msm_result_ark).to_ark_affine()
);
}
}
#[test]
fn test_batch_msm() {
for batch_pow2 in [2, 4] {
for pow2 in [4, 6] {
let msm_size = 1 << pow2;
let batch_size = 1 << batch_pow2;
let seed = None; // set Some to provide seed
let points_batch = generate_random_points(msm_size * batch_size, get_rng(seed));
let scalars_batch = generate_random_scalars(msm_size * batch_size, get_rng(seed));
let point_r_ark: Vec<_> = points_batch.iter().map(|x| x.to_ark_repr()).collect();
let scalars_r_ark: Vec<_> = scalars_batch.iter().map(|x| x.to_ark()).collect();
let expected: Vec<_> = point_r_ark
.chunks(msm_size)
.zip(scalars_r_ark.chunks(msm_size))
.map(|p| Point::from_ark(VariableBaseMSM::multi_scalar_mul(p.0, p.1)))
.collect();
let result = msm_batch(&points_batch, &scalars_batch, batch_size, 0);
assert_eq!(result, expected);
}
}
}
#[test]
fn test_commit() {
let test_size = 1 << 8;
let seed = Some(0);
let (mut scalars, mut d_scalars, _) = set_up_scalars(test_size, 0, false);
let mut points = generate_random_points(test_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm(&points, &scalars, 0);
let mut d_commit_result = commit(&mut d_points, &mut d_scalars);
let mut h_commit_result = Point::zero();
d_commit_result.copy_to(&mut h_commit_result).unwrap();
assert_eq!(msm_result, h_commit_result);
assert_ne!(msm_result, Point::zero());
assert_ne!(h_commit_result, Point::zero());
}
#[test]
fn test_batch_commit() {
let batch_size = 4;
let test_size = 1 << 12;
let seed = Some(0);
let (scalars, mut d_scalars, _) = set_up_scalars(test_size * batch_size, 0, false);
let points = generate_random_points(test_size * batch_size, get_rng(seed));
let mut d_points = DeviceBuffer::from_slice(&points[..]).unwrap();
let msm_result = msm_batch(&points, &scalars, batch_size, 0);
let mut d_commit_result = commit_batch(&mut d_points, &mut d_scalars, batch_size);
let mut h_commit_result: Vec<Point> = (0..batch_size).map(|_| Point::zero()).collect();
d_commit_result.copy_to(&mut h_commit_result[..]).unwrap();
assert_eq!(msm_result, h_commit_result);
for h in h_commit_result {
assert_ne!(h, Point::zero());
}
}
#[test]
fn test_ntt() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 3;
let scalars = generate_random_scalars(test_size, get_rng(seed));
let mut ntt_result = scalars.clone();
ntt(&mut ntt_result, 0);
assert_ne!(ntt_result, scalars);
let mut intt_result = ntt_result.clone();
intt(&mut intt_result, 0);
assert_eq!(intt_result, scalars);
//ECNTT
let points_proj = generate_random_points_proj(test_size, get_rng(seed));
test_naive_ark_ecntt(test_size);
assert!(points_proj[0].to_ark().into_affine().is_on_curve());
//naive ark
let points_proj_ark = points_proj
.iter()
.map(|p| p.to_ark())
.collect::<Vec<G1Projective>>();
let ecntt_result_naive = ecntt_arc_naive(&points_proj_ark, points_proj_ark.len(), false);
let iecntt_result_naive = ecntt_arc_naive(&ecntt_result_naive, points_proj_ark.len(), true);
assert_eq!(points_proj_ark, iecntt_result_naive);
//ingo gpu
let mut ecntt_result = points_proj.to_vec();
ecntt(&mut ecntt_result, 0);
assert_ne!(ecntt_result, points_proj);
let mut iecntt_result = ecntt_result.clone();
iecntt(&mut iecntt_result, 0);
assert_eq!(
iecntt_result_naive,
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
assert_eq!(
iecntt_result
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>(),
points_proj
.iter()
.map(|p| p.to_ark_affine())
.collect::<Vec<G1Affine>>()
);
}
#[test]
fn test_ntt_batch() {
//NTT
let seed = None; //some value to fix the rng
let test_size = 1 << 5;
let batches = 4;
let scalars_batch: Vec<Scalar> =
generate_random_scalars(test_size * batches, get_rng(seed));
let mut scalar_vec_of_vec: Vec<Vec<Scalar>> = Vec::new();
for i in 0..batches {
scalar_vec_of_vec.push(scalars_batch[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result = scalars_batch.clone();
// do batch ntt
ntt_batch(&mut ntt_result, test_size, 0);
let mut ntt_result_vec_of_vec = Vec::new();
// do ntt for every chunk
for i in 0..batches {
ntt_result_vec_of_vec.push(scalar_vec_of_vec[i].clone());
ntt(&mut ntt_result_vec_of_vec[i], 0);
}
// check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
ntt_result_vec_of_vec[i],
ntt_result[i * test_size..(i + 1) * test_size]
);
}
// check that ntt output is different from input
assert_ne!(ntt_result, scalars_batch);
let mut intt_result = ntt_result.clone();
// do batch intt
intt_batch(&mut intt_result, test_size, 0);
let mut intt_result_vec_of_vec = Vec::new();
// do intt for every chunk
for i in 0..batches {
intt_result_vec_of_vec.push(ntt_result_vec_of_vec[i].clone());
intt(&mut intt_result_vec_of_vec[i], 0);
}
// check that the intt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_vec_of_vec[i],
intt_result[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result, scalars_batch);
// //ECNTT
let points_proj = generate_random_points_proj(test_size * batches, get_rng(seed));
let mut points_vec_of_vec: Vec<Vec<Point>> = Vec::new();
for i in 0..batches {
points_vec_of_vec.push(points_proj[i * test_size..(i + 1) * test_size].to_vec());
}
let mut ntt_result_points = points_proj.clone();
// do batch ecintt
ecntt_batch(&mut ntt_result_points, test_size, 0);
let mut ntt_result_points_vec_of_vec = Vec::new();
for i in 0..batches {
ntt_result_points_vec_of_vec.push(points_vec_of_vec[i].clone());
ecntt(&mut ntt_result_points_vec_of_vec[i], 0);
}
for i in 0..batches {
assert_eq!(
ntt_result_points_vec_of_vec[i],
ntt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_ne!(ntt_result_points, points_proj);
let mut intt_result_points = ntt_result_points.clone();
// do batch ecintt
iecntt_batch(&mut intt_result_points, test_size, 0);
let mut intt_result_points_vec_of_vec = Vec::new();
// do ecintt for every chunk
for i in 0..batches {
intt_result_points_vec_of_vec.push(ntt_result_points_vec_of_vec[i].clone());
iecntt(&mut intt_result_points_vec_of_vec[i], 0);
}
// check that the ecintt of each vec of scalars is equal to the intt of the specific batch
for i in 0..batches {
assert_eq!(
intt_result_points_vec_of_vec[i],
intt_result_points[i * test_size..(i + 1) * test_size]
);
}
assert_eq!(intt_result_points, points_proj);
}
#[test]
fn test_scalar_interpolation() {
let log_test_size = 7;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size, log_test_size, true);
reverse_order_scalars(&mut d_evals);
let mut d_coeffs = interpolate_scalars(&mut d_evals, &mut d_domain);
intt(&mut evals_mut, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_scalar_batch_interpolation() {
let batch_size = 4;
let log_test_size = 10;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, true);
reverse_order_scalars_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_scalars_batch(&mut d_evals, &mut d_domain, batch_size);
intt_batch(&mut evals_mut, test_size, 0);
let mut h_coeffs: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, evals_mut);
}
#[test]
fn test_point_interpolation() {
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size, log_test_size, true);
reverse_order_points(&mut d_evals);
let mut d_coeffs = interpolate_points(&mut d_evals, &mut d_domain);
iecntt(&mut evals_mut[..], 0);
let mut h_coeffs: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_point_batch_interpolation() {
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (mut evals_mut, mut d_evals, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, true);
reverse_order_points_batch(&mut d_evals, batch_size);
let mut d_coeffs = interpolate_points_batch(&mut d_evals, &mut d_domain, batch_size);
iecntt_batch(&mut evals_mut[..], test_size, 0);
let mut h_coeffs: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs.copy_to(&mut h_coeffs[..]).unwrap();
assert_eq!(h_coeffs, *evals_mut);
for h in h_coeffs.iter() {
assert_ne!(*h, Point::zero());
}
}
#[test]
fn test_scalar_evaluation() {
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_scalars(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs, h_coeffs_domain[..coeff_size]);
for i in coeff_size.. (1 << log_test_domain_size) {
assert_eq!(Scalar::zero(), h_coeffs_domain[i]);
}
}
#[test]
fn test_scalar_batch_evaluation() {
let batch_size = 6;
let log_test_domain_size = 8;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 6;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(0, log_test_domain_size, true);
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_scalars_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Scalar> = (0..domain_size * batch_size).map(|_| Scalar::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..j * domain_size + coeff_size]);
for i in coeff_size..domain_size {
assert_eq!(Scalar::zero(), h_coeffs_domain[j * domain_size + i]);
}
}
}
#[test]
fn test_point_evaluation() {
let log_test_domain_size = 7;
let coeff_size = 1 << 7;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut d_coeffs_domain = interpolate_points(&mut d_evals, &mut d_domain_inv);
let mut h_coeffs_domain: Vec<Point> = (0..1 << log_test_domain_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
assert_eq!(h_coeffs[..], h_coeffs_domain[..coeff_size]);
for i in coeff_size..(1 << log_test_domain_size) {
assert_eq!(Point::zero(), h_coeffs_domain[i]);
}
for i in 0..coeff_size {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation() {
let batch_size = 4;
let log_test_domain_size = 6;
let domain_size = 1 << log_test_domain_size;
let coeff_size = 1 << 5;
let (h_coeffs, mut d_coeffs, mut d_domain) = set_up_points(coeff_size * batch_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_points(0, log_test_domain_size, true);
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut d_coeffs_domain = interpolate_points_batch(&mut d_evals, &mut d_domain_inv, batch_size);
let mut h_coeffs_domain: Vec<Point> = (0..domain_size * batch_size).map(|_| Point::zero()).collect();
d_coeffs_domain.copy_to(&mut h_coeffs_domain[..]).unwrap();
for j in 0..batch_size {
assert_eq!(h_coeffs[j * coeff_size..(j + 1) * coeff_size], h_coeffs_domain[j * domain_size..(j * domain_size + coeff_size)]);
for i in coeff_size..domain_size {
assert_eq!(Point::zero(), h_coeffs_domain[j * domain_size + i]);
}
for i in j * domain_size..(j * domain_size + coeff_size) {
assert_ne!(h_coeffs_domain[i], Point::zero());
}
}
}
#[test]
fn test_scalar_evaluation_on_trivial_coset() {
// checks that the evaluations on the subgroup is the same as on the coset generated by 1
let log_test_domain_size = 8;
let coeff_size = 1 << 6;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(coeff_size, log_test_domain_size, false);
let (_, _, mut d_domain_inv) = set_up_scalars(coeff_size, log_test_domain_size, true);
let mut d_trivial_coset_powers = build_domain(1 << log_test_domain_size, 0, false);
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_coeffs: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_coeffs[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_trivial_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..1 << log_test_domain_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_coeffs, h_evals_coset);
}
#[test]
fn test_scalar_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
}
#[test]
fn test_scalar_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 4;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_scalars(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_scalars(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_scalars_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Scalar> = (0..2 * test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_scalars_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_scalars_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Scalar> = (0..test_size * batch_size).map(|_| Scalar::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
}
#[test]
fn test_point_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let log_test_size = 8;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points(&mut d_coeffs, &mut d_large_domain);
let mut h_evals_large: Vec<Point> = (0..2 * test_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points(&mut d_coeffs, &mut d_domain);
let mut h_evals: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset(&mut d_coeffs, &mut d_domain, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
assert_eq!(h_evals[..], h_evals_large[..test_size]);
assert_eq!(h_evals_coset[..], h_evals_large[test_size..2 * test_size]);
for i in 0..test_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
#[test]
fn test_point_batch_evaluation_on_coset() {
// checks that evaluating a polynomial on a subgroup and its coset is the same as evaluating on a 2x larger subgroup
let batch_size = 2;
let log_test_size = 6;
let test_size = 1 << log_test_size;
let (_, mut d_coeffs, mut d_domain) = set_up_points(test_size * batch_size, log_test_size, false);
let (_, _, mut d_large_domain) = set_up_points(0, log_test_size + 1, false);
let mut d_coset_powers = build_domain(test_size, log_test_size + 1, false);
let mut d_evals_large = evaluate_points_batch(&mut d_coeffs, &mut d_large_domain, batch_size);
let mut h_evals_large: Vec<Point> = (0..2 * test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_large.copy_to(&mut h_evals_large[..]).unwrap();
let mut d_evals = evaluate_points_batch(&mut d_coeffs, &mut d_domain, batch_size);
let mut h_evals: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals.copy_to(&mut h_evals[..]).unwrap();
let mut d_evals_coset = evaluate_points_on_coset_batch(&mut d_coeffs, &mut d_domain, batch_size, &mut d_coset_powers);
let mut h_evals_coset: Vec<Point> = (0..test_size * batch_size).map(|_| Point::zero()).collect();
d_evals_coset.copy_to(&mut h_evals_coset[..]).unwrap();
for i in 0..batch_size {
assert_eq!(h_evals_large[2 * i * test_size..(2 * i + 1) * test_size], h_evals[i * test_size..(i + 1) * test_size]);
assert_eq!(h_evals_large[(2 * i + 1) * test_size..(2 * i + 2) * test_size], h_evals_coset[i * test_size..(i + 1) * test_size]);
}
for i in 0..test_size * batch_size {
assert_ne!(h_evals[i], Point::zero());
assert_ne!(h_evals_coset[i], Point::zero());
assert_ne!(h_evals_large[2 * i], Point::zero());
assert_ne!(h_evals_large[2 * i + 1], Point::zero());
}
}
// testing matrix multiplication by comparing the result of FFT with the naive multiplication by the DFT matrix
#[test]
fn test_matrix_multiplication() {
let seed = None; // some value to fix the rng
let test_size = 1 << 5;
let rou = Fr::get_root_of_unity(test_size).unwrap();
let matrix_flattened: Vec<Scalar> = (0..test_size).map(
|row_num| { (0..test_size).map(
|col_num| {
let pow: [u64; 1] = [(row_num * col_num).try_into().unwrap()];
Scalar::from_ark(Fr::pow(&rou, &pow).into_repr())
}).collect::<Vec<Scalar>>()
}).flatten().collect::<Vec<_>>();
let vector: Vec<Scalar> = generate_random_scalars(test_size, get_rng(seed));
let result = mult_matrix_by_vec(&matrix_flattened, &vector, 0);
let mut ntt_result = vector.clone();
ntt(&mut ntt_result, 0);
// we don't use the same roots of unity as arkworks, so the results are permutations
// of one another and the only guaranteed fixed scalars are the following ones:
assert_eq!(result[0], ntt_result[0]);
assert_eq!(result[test_size >> 1], ntt_result[test_size >> 1]);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_scalar_mul() {
let mut intoo = [Scalar::one(), Scalar::one(), Scalar::zero()];
let expected = [Scalar::one(), Scalar::zero(), Scalar::zero()];
mult_sc_vec(&mut intoo, &expected, 0);
assert_eq!(intoo, expected);
}
#[test]
#[allow(non_snake_case)]
fn test_vec_point_mul() {
let dummy_one = Point {
x: Base::one(),
y: Base::one(),
z: Base::one(),
};
let mut inout = [dummy_one, dummy_one, Point::zero()];
let scalars = [Scalar::one(), Scalar::zero(), Scalar::zero()];
let expected = [dummy_one, Point::zero(), Point::zero()];
multp_vec(&mut inout, &scalars, 0);
assert_eq!(inout, expected);
}
}

View File

@@ -1,20 +1,13 @@
{
"curve_name" : "bls12_377",
"modulus_p" : 8444461749428370424248824938781546531375899335154063827935233455917409239041,
"modolus_p" : 8444461749428370424248824938781546531375899335154063827935233455917409239041,
"bit_count_p" : 253,
"limb_p" : 8,
"ntt_size" : 32,
"modulus_q" : 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177,
"modolus_q" : 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177,
"bit_count_q" : 377,
"limb_q" : 12,
"root_of_unity" : 5928890464389279575069867463136436689218492512582288454256978381122364252082,
"weierstrass_b" : 1,
"weierstrass_b_g2_re" : 0,
"weierstrass_b_g2_im" : 155198655607781456406391640216936120121836107652948796323930557600032281009004493664981332883744016074664192874906,
"g1_gen_x" : 81937999373150964239938255573465948239988671502647976594219695644855304257327692006745978603320413799295628339695,
"g1_gen_y" : 241266749859715473739788878240585681733927191168601896383759122102112907357779751001206799952863815012735208165030,
"g2_gen_x_re" : 233578398248691099356572568220835526895379068987715365179118596935057653620464273615301663571204657964920925606294,
"g2_gen_x_im" : 140913150380207355837477652521042157274541796891053068589147167627541651775299824604154852141315666357241556069118,
"g2_gen_y_re" : 63160294768292073209381361943935198908131692476676907196754037919244929611450776219210369229519898517858833747423,
"g2_gen_y_im" : 149157405641012693445398062341192467754805999074082136895788947234480009303640899064710353187729182149407503257491
"gen_x" : 81937999373150964239938255573465948239988671502647976594219695644855304257327692006745978603320413799295628339695,
"gen_y" : 241266749859715473739788878240585681733927191168601896383759122102112907357779751001206799952863815012735208165030
}

View File

@@ -1,20 +1,13 @@
{
"curve_name" : "bls12_381",
"modulus_p" : 52435875175126190479447740508185965837690552500527637822603658699938581184513,
"modolus_p" : 52435875175126190479447740508185965837690552500527637822603658699938581184513,
"bit_count_p" : 255,
"limb_p" : 8,
"ntt_size" : 32,
"modulus_q" : 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787,
"modolus_q" : 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787,
"bit_count_q" : 381,
"limb_q" : 12,
"root_of_unity" : 937917089079007706106976984802249742464848817460758522850752807661925904159,
"weierstrass_b" : 4,
"weierstrass_b_g2_re":4,
"weierstrass_b_g2_im":4,
"g1_gen_x" : 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507,
"g1_gen_y" : 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569,
"g2_gen_x_re" : 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160,
"g2_gen_x_im" : 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758,
"g2_gen_y_re" : 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905,
"g2_gen_y_im" : 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582
"gen_x" : 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507,
"gen_y" : 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569
}

View File

@@ -1,20 +1,13 @@
{
"curve_name" : "bn254",
"modulus_p" : 21888242871839275222246405745257275088548364400416034343698204186575808495617,
"modolus_p" : 21888242871839275222246405745257275088548364400416034343698204186575808495617,
"bit_count_p" : 254,
"limb_p" : 8,
"ntt_size" : 20,
"modulus_q" : 21888242871839275222246405745257275088696311157297823662689037894645226208583,
"ntt_size" : 16,
"modolus_q" : 21888242871839275222246405745257275088696311157297823662689037894645226208583,
"bit_count_q" : 254,
"limb_q" : 8,
"root_of_unity" : 19032961837237948602743626455740240236231119053033140765040043513661803148152,
"weierstrass_b" : 3,
"weierstrass_b_g2_re" : 19485874751759354771024239261021720505790618469301721065564631296452457478373,
"weierstrass_b_g2_im" : 266929791119991161246907387137283842545076965332900288569378510910307636690,
"g1_gen_x" : 1,
"g1_gen_y" : 2,
"g2_gen_x_re" : 10857046999023057135944570762232829481370756359578518086990519993285655852781,
"g2_gen_x_im" : 11559732032986387107991004021392285783925812861821192530917403151452391805634,
"g2_gen_y_re" : 8495653923123431417604973247489272438418190587263600148770280649306958101930,
"g2_gen_y_im" : 4082367875863433681332203403145435568316851327593401208105741076214120093531
"gen_x" : 1,
"gen_y" : 2
}

View File

@@ -1,12 +1,30 @@
import json
import math
import os
from string import Template
from sympy.ntheory import isprime, primitive_root
import subprocess
import random
import sys
data = None
with open(sys.argv[1]) as json_file:
data = json.load(json_file)
def to_hex(val: int, length):
x = hex(val)[2:]
curve_name = data["curve_name"]
modolus_p = data["modolus_p"]
bit_count_p = data["bit_count_p"]
limb_p = data["limb_p"]
ntt_size = data["ntt_size"]
modolus_q = data["modolus_q"]
bit_count_q = data["bit_count_q"]
limb_q = data["limb_q"]
weierstrass_b = data["weierstrass_b"]
gen_x = data["gen_x"]
gen_y = data["gen_y"]
def to_hex(val, length):
x = str(hex(val))[2:]
if len(x) % 8 != 0:
x = "0" * (8-len(x) % 8) + x
if len(x) != length:
@@ -15,260 +33,133 @@ def to_hex(val: int, length):
chunks = [x[i:i+n] for i in range(0, len(x), n)][::-1]
s = ""
for c in chunks:
s += f'0x{c}, '
return s[:-2]
s += "0x" + c + ", "
return s
def compute_values(modulus, modulus_bit_count, limbs):
limb_size = 8*limbs
modulus_ = to_hex(modulus,limb_size)
modulus_2 = to_hex(modulus*2,limb_size)
modulus_4 = to_hex(modulus*4,limb_size)
modulus_wide = to_hex(modulus,limb_size*2)
modulus_squared = to_hex(modulus*modulus,limb_size)
modulus_squared_2 = to_hex(modulus*modulus*2,limb_size)
modulus_squared_4 = to_hex(modulus*modulus*4,limb_size)
m_raw = int(math.floor(int(pow(2,2*modulus_bit_count) // modulus)))
m = to_hex(m_raw,limb_size)
one = to_hex(1,limb_size)
zero = to_hex(0,limb_size)
def get_root_of_unity(order: int) -> int:
assert (modolus_p - 1) % order == 0
return pow(5, (modolus_p - 1) // order, modolus_p)
return (
modulus_,
modulus_2,
modulus_4,
modulus_wide,
modulus_squared,
modulus_squared_2,
modulus_squared_4,
m,
one,
zero
)
def create_field_parameters_struct(modulus, modulus_bits_count,limbs,ntt,size,name):
s = " struct "+name+"{\n"
s += " static constexpr unsigned limbs_count = " + str(limbs)+";\n"
s += " static constexpr storage<limbs_count> modulus = {"+to_hex(modulus,8*limbs)[:-2]+"};\n"
s += " static constexpr storage<limbs_count> modulus_2 = {"+to_hex(modulus*2,8*limbs)[:-2]+"};\n"
s += " static constexpr storage<limbs_count> modulus_4 = {"+to_hex(modulus*4,8*limbs)[:-2]+"};\n"
s += " static constexpr storage<2*limbs_count> modulus_wide = {"+to_hex(modulus,8*limbs*2)[:-2]+"};\n"
s += " static constexpr storage<2*limbs_count> modulus_sqared = {"+to_hex(modulus*modulus,8*limbs)[:-2]+"};\n"
s += " static constexpr storage<2*limbs_count> modulus_sqared_2 = {"+to_hex(modulus*modulus*2,8*limbs)[:-2]+"};\n"
s += " static constexpr storage<2*limbs_count> modulus_sqared_4 = {"+to_hex(modulus*modulus*2*2,8*limbs)[:-2]+"};\n"
s += " static constexpr unsigned modulus_bits_count = "+str(modulus_bits_count)+";\n"
m = int(math.floor(int(pow(2,2*modulus_bits_count) // modulus)))
s += " static constexpr storage<limbs_count> m = {"+ to_hex(m,8*limbs)[:-2] +"};\n"
s += " static constexpr storage<limbs_count> one = {"+ to_hex(1,8*limbs)[:-2] +"};\n"
s += " static constexpr storage<limbs_count> zero = {"+ to_hex(0,8*limbs)[:-2] +"};\n"
def get_fq_params(modulus, modulus_bit_count, limbs, g1_gen_x, g1_gen_y, g2_gen_x_re, g2_gen_x_im, g2_gen_y_re, g2_gen_y_im):
(
modulus,
modulus_2,
modulus_4,
modulus_wide,
modulus_squared,
modulus_squared_2,
modulus_squared_4,
m,
one,
zero
) = compute_values(modulus, modulus_bit_count, limbs)
limb_size = 8*limbs
return {
'fq_modulus': modulus,
'fq_modulus_2': modulus_2,
'fq_modulus_4': modulus_4,
'fq_modulus_wide': modulus_wide,
'fq_modulus_squared': modulus_squared,
'fq_modulus_squared_2': modulus_squared_2,
'fq_modulus_squared_4': modulus_squared_4,
'fq_m': m,
'fq_one': one,
'fq_zero': zero,
'fq_gen_x': to_hex(g1_gen_x, limb_size),
'fq_gen_y': to_hex(g1_gen_y, limb_size),
'fq_gen_x_re': to_hex(g2_gen_x_re, limb_size),
'fq_gen_x_im': to_hex(g2_gen_x_im, limb_size),
'fq_gen_y_re': to_hex(g2_gen_y_re, limb_size),
'fq_gen_y_im': to_hex(g2_gen_y_im, limb_size)
}
def get_fp_params(modulus, modulus_bit_count, limbs, root_of_unity, size=0):
(
modulus_,
modulus_2,
modulus_4,
modulus_wide,
modulus_squared,
modulus_squared_2,
modulus_squared_4,
m,
one,
zero
) = compute_values(modulus, modulus_bit_count, limbs)
limb_size = 8*limbs
if size > 0:
omega = ''
omega_inv = ''
inv = ''
omegas = []
omegas_inv = []
if ntt:
for k in range(size):
if k == 0:
om = root_of_unity
else:
om = pow(om, 2, modulus)
omegas.append(om)
omegas_inv.append(pow(om, -1, modulus))
omegas.reverse()
omegas_inv.reverse()
omega = get_root_of_unity(int(pow(2,k+1)))
s += " static constexpr storage<limbs_count> omega"+str(k+1)+"= {"+ to_hex(omega,8*limbs)[:-2]+"};\n"
for k in range(size):
omega += "\n {"+ to_hex(omegas[k],limb_size)+"}," if k>0 else " {"+ to_hex(omegas[k],limb_size)+"},"
omega_inv += "\n {"+ to_hex(omegas_inv[k],limb_size)+"}," if k>0 else " {"+ to_hex(omegas_inv[k],limb_size)+"},"
inv += "\n {"+ to_hex(pow(int(pow(2,k+1)), -1, modulus),limb_size)+"}," if k>0 else " {"+ to_hex(pow(int(pow(2,k+1)), -1, modulus),limb_size)+"},"
return {
'fp_modulus': modulus_,
'fp_modulus_2': modulus_2,
'fp_modulus_4': modulus_4,
'fp_modulus_wide': modulus_wide,
'fp_modulus_squared': modulus_squared,
'fp_modulus_squared_2': modulus_squared_2,
'fp_modulus_squared_4': modulus_squared_4,
'fp_m': m,
'fp_one': one,
'fp_zero': zero,
'omega': omega[:-1],
'omega_inv': omega_inv[:-1],
'inv': inv[:-1],
}
omega = get_root_of_unity(int(pow(2,k+1)))
s += " static constexpr storage<limbs_count> omega_inv"+str(k+1)+"= {"+ to_hex(pow(omega, -1, modulus),8*limbs)[:-2]+"};\n"
for k in range(size):
s += " static constexpr storage<limbs_count> inv"+str(k+1)+"= {"+ to_hex(pow(int(pow(2,k+1)), -1, modulus),8*limbs)[:-2]+"};\n"
s+=" };\n"
return s
def create_gen():
s = " struct group_generator {\n"
s += " static constexpr storage<fq_config::limbs_count> generator_x = {"+to_hex(gen_x,8*limb_q)[:-2]+ "};\n"
s += " static constexpr storage<fq_config::limbs_count> generator_y = {"+to_hex(gen_y,8*limb_q)[:-2]+ "};\n"
s+=" };\n"
return s
def get_weier_params(weierstrass_b, weierstrass_b_g2_re, weierstrass_b_g2_im, size):
return {
'weier_b': to_hex(weierstrass_b, size),
'weier_b_g2_re': to_hex(weierstrass_b_g2_re, size),
'weier_b_g2_im': to_hex(weierstrass_b_g2_im, size),
}
def get_config_file_content(modolus_p, bit_count_p, limb_p, ntt_size, modolus_q, bit_count_q, limb_q, weierstrass_b):
file_content = ""
file_content += "#pragma once\n#include \"../../utils/storage.cuh\"\n"
file_content += "namespace PARAMS_"+curve_name.upper()+"{\n"
file_content += create_field_parameters_struct(modolus_p,bit_count_p,limb_p,True,ntt_size,"fp_config")
file_content += create_field_parameters_struct(modolus_q,bit_count_q,limb_q,False,0,"fq_config")
file_content += " static constexpr unsigned weierstrass_b = " + str(weierstrass_b)+ ";\n"
file_content += create_gen()
file_content+="}\n"
return file_content
def get_params(config):
global ntt_size
curve_name = config["curve_name"]
modulus_p = config["modulus_p"]
bit_count_p = config["bit_count_p"]
limb_p = config["limb_p"]
ntt_size = config["ntt_size"]
modulus_q = config["modulus_q"]
bit_count_q = config["bit_count_q"]
limb_q = config["limb_q"]
root_of_unity = config["root_of_unity"]
if root_of_unity == modulus_p:
sys.exit("Invalid root_of_unity value; please update in curve parameters")
weierstrass_b = config["weierstrass_b"]
weierstrass_b_g2_re = config["weierstrass_b_g2_re"]
weierstrass_b_g2_im = config["weierstrass_b_g2_im"]
g1_gen_x = config["g1_gen_x"]
g1_gen_y = config["g1_gen_y"]
g2_generator_x_re = config["g2_gen_x_re"]
g2_generator_x_im = config["g2_gen_x_im"]
g2_generator_y_re = config["g2_gen_y_re"]
g2_generator_y_im = config["g2_gen_y_im"]
params = {
'curve_name_U': curve_name.upper(),
'fp_num_limbs': limb_p,
'fq_num_limbs': limb_q,
'fp_modulus_bit_count': bit_count_p,
'fq_modulus_bit_count': bit_count_q,
'num_omegas': ntt_size
}
fp_params = get_fp_params(modulus_p, bit_count_p, limb_p, root_of_unity, ntt_size)
fq_params = get_fq_params(modulus_q, bit_count_q, limb_q, g1_gen_x, g1_gen_y, g2_generator_x_re, g2_generator_x_im, g2_generator_y_re, g2_generator_y_im)
weier_params = get_weier_params(weierstrass_b, weierstrass_b_g2_re, weierstrass_b_g2_im, 8*limb_q)
return {
**params,
**fp_params,
**fq_params,
**weier_params
}
config = None
with open(sys.argv[1]) as json_file:
config = json.load(json_file)
curve_name_lower = config["curve_name"].lower()
curve_name_upper = config["curve_name"].upper()
limb_q = config["limb_q"]
limb_p = config["limb_p"]
# Create Cuda interface
newpath = f'./icicle/curves/{curve_name_lower}'
newpath = "./icicle-cuda/curves/"+curve_name
if not os.path.exists(newpath):
os.makedirs(newpath)
with open("./icicle/curves/curve_template/params.cuh.tmpl", "r") as params_file:
params_file_template = Template(params_file.read())
params = get_params(config)
params_content = params_file_template.safe_substitute(params)
with open(f'./icicle/curves/{curve_name_lower}/params.cuh', 'w') as f:
f.write(params_content)
fc = get_config_file_content(modolus_p, bit_count_p, limb_p, ntt_size, modolus_q, bit_count_q, limb_q, weierstrass_b)
text_file = open("./icicle-cuda/curves/"+curve_name+"/params.cuh", "w")
n = text_file.write(fc)
text_file.close()
with open("./icicle/curves/curve_template/lde.cu.tmpl", "r") as lde_file:
template_content = Template(lde_file.read())
lde_content = template_content.safe_substitute(
CURVE_NAME_U=curve_name_upper,
CURVE_NAME_L=curve_name_lower
)
with open(f'./icicle/curves/{curve_name_lower}/lde.cu', 'w') as f:
f.write(lde_content)
with open("./icicle-cuda/curves/curve_template/lde.cu", "r") as lde_file:
content = lde_file.read()
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
text_file = open("./icicle-cuda/curves/"+curve_name+"/lde.cu", "w")
n = text_file.write(content)
text_file.close()
with open("./icicle/curves/curve_template/msm.cu.tmpl", "r") as msm_file:
template_content = Template(msm_file.read())
msm_content = template_content.safe_substitute(
CURVE_NAME_U=curve_name_upper,
CURVE_NAME_L=curve_name_lower
)
with open(f'./icicle/curves/{curve_name_lower}/msm.cu', 'w') as f:
f.write(msm_content)
with open("./icicle-cuda/curves/curve_template/msm.cu", "r") as msm_file:
content = msm_file.read()
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
text_file = open("./icicle-cuda/curves/"+curve_name+"/msm.cu", "w")
n = text_file.write(content)
text_file.close()
with open("./icicle/curves/curve_template/ve_mod_mult.cu.tmpl", "r") as ve_mod_mult_file:
template_content = Template(ve_mod_mult_file.read())
ve_mod_mult_content = template_content.safe_substitute(
CURVE_NAME_U=curve_name_upper,
CURVE_NAME_L=curve_name_lower
)
with open(f'./icicle/curves/{curve_name_lower}/ve_mod_mult.cu', 'w') as f:
f.write(ve_mod_mult_content)
with open("./icicle-cuda/curves/curve_template/ve_mod_mult.cu", "r") as ve_mod_mult_file:
content = ve_mod_mult_file.read()
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
text_file = open("./icicle-cuda/curves/"+curve_name+"/ve_mod_mult.cu", "w")
n = text_file.write(content)
text_file.close()
with open(f'./icicle/curves/curve_template/curve_config.cuh.tmpl', 'r') as cc:
template_content = Template(cc.read())
cc_content = template_content.safe_substitute(
CURVE_NAME_U=curve_name_upper,
)
with open(f'./icicle/curves/{curve_name_lower}/curve_config.cuh', 'w') as f:
f.write(cc_content)
namespace = '#include "params.cuh"\n'+'''namespace CURVE_NAME_U {
typedef Field<PARAMS_CURVE_NAME_U::fp_config> scalar_field_t;\
typedef scalar_field_t scalar_t;\
typedef Field<PARAMS_CURVE_NAME_U::fq_config> point_field_t;
typedef Projective<point_field_t, scalar_field_t, PARAMS_CURVE_NAME_U::group_generator, PARAMS_CURVE_NAME_U::weierstrass_b> projective_t;
typedef Affine<point_field_t> affine_t;
}'''
with open('./icicle-cuda/curves/'+curve_name+'/curve_config.cuh', 'w') as f:
f.write(namespace.replace("CURVE_NAME_U",curve_name.upper()))
eq = '''
#include <cuda.h>\n
#include "curve_config.cuh"\n
#include "../../primitives/projective.cuh"\n
extern "C" bool eq_CURVE_NAME_L(CURVE_NAME_U::projective_t *point1, CURVE_NAME_U::projective_t *point2)
{
return (*point1 == *point2);
}'''
with open(f'./icicle/curves/curve_template/projective.cu.tmpl', 'r') as proj:
template_content = Template(proj.read())
proj_content = template_content.safe_substitute(
CURVE_NAME_U=curve_name_upper,
CURVE_NAME_L=curve_name_lower
)
with open(f'./icicle/curves/{curve_name_lower}/projective.cu', 'w') as f:
f.write(proj_content)
with open('./icicle-cuda/curves/'+curve_name+'/projective.cu', 'w') as f:
f.write(eq.replace("CURVE_NAME_U",curve_name.upper()).replace("CURVE_NAME_L",curve_name.lower()))
supported_operations = '''
#include "projective.cu"
#include "lde.cu"
#include "msm.cu"
#include "ve_mod_mult.cu"
'''
with open(f'./icicle/curves/curve_template/supported_operations.cu.tmpl', 'r') as supp_ops:
template_content = Template(supp_ops.read())
supp_ops_content = template_content.safe_substitute()
with open(f'./icicle/curves/{curve_name_lower}/supported_operations.cu', 'w') as f:
f.write(supp_ops_content)
with open('./icicle/curves/index.cu', 'r+') as f:
index_text = f.read()
if index_text.find(curve_name_lower) == -1:
f.write(f'\n#include "{curve_name_lower}/supported_operations.cu"')
with open('./icicle-cuda/curves/'+curve_name+'/supported_operations.cu', 'w') as f:
f.write(supported_operations.replace("CURVE_NAME_U",curve_name.upper()).replace("CURVE_NAME_L",curve_name.lower()))
with open('./icicle-cuda/curves/index.cu', 'a') as f:
f.write('\n#include "'+curve_name.lower()+'/supported_operations.cu"')
@@ -277,40 +168,36 @@ with open('./icicle/curves/index.cu', 'r+') as f:
if limb_p == limb_q:
with open("./src/curve_templates/curve_same_limbs.rs", "r") as curve_file:
content = curve_file.read()
content = content.replace("CURVE_NAME_U",curve_name_upper)
content = content.replace("CURVE_NAME_L",curve_name_lower)
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
content = content.replace("_limbs_p",str(limb_p * 8 * 4))
content = content.replace("limbs_p",str(limb_p))
text_file = open("./src/curves/"+curve_name_lower+".rs", "w")
text_file = open("./src/curves/"+curve_name+".rs", "w")
n = text_file.write(content)
text_file.close()
else:
with open("./src/curve_templates/curve_different_limbs.rs", "r") as curve_file:
content = curve_file.read()
content = content.replace("CURVE_NAME_U",curve_name_upper)
content = content.replace("CURVE_NAME_L",curve_name_lower)
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
content = content.replace("_limbs_p",str(limb_p * 8 * 4))
content = content.replace("limbs_p",str(limb_p))
content = content.replace("_limbs_q",str(limb_q * 8 * 4))
content = content.replace("limbs_q",str(limb_q))
text_file = open("./src/curves/"+curve_name_lower+".rs", "w")
text_file = open("./src/curves/"+curve_name+".rs", "w")
n = text_file.write(content)
text_file.close()
with open("./src/curve_templates/test.rs", "r") as test_file:
content = test_file.read()
content = content.replace("CURVE_NAME_U",curve_name_upper)
content = content.replace("CURVE_NAME_L",curve_name_lower)
text_file = open("./src/test_"+curve_name_lower+".rs", "w")
content = content.replace("CURVE_NAME_U",curve_name.upper())
content = content.replace("CURVE_NAME_L",curve_name.lower())
text_file = open("./src/test_"+curve_name+".rs", "w")
n = text_file.write(content)
text_file.close()
with open('./src/curves/mod.rs', 'r+') as f:
mod_text = f.read()
if mod_text.find(curve_name_lower) == -1:
f.write('\npub mod ' + curve_name_lower + ';')
with open('./src/curves/mod.rs', 'a') as f:
f.write('\n pub mod ' + curve_name + ';')
with open('./src/lib.rs', 'r+') as f:
lib_text = f.read()
if lib_text.find(curve_name_lower) == -1:
f.write('\npub mod ' + curve_name_lower + ';')
with open('./src/lib.rs', 'a') as f:
f.write('\npub mod ' + curve_name + ';')

View File

@@ -1,156 +0,0 @@
use std::time::Instant;
use icicle_utils::{curves::bls12_381::ScalarField_BLS12_381, test_bls12_381::*};
use rustacuda::prelude::DeviceBuffer;
const LOG_NTT_SIZES: [usize; 3] = [20, 10, 9];
const BATCH_SIZES: [usize; 3] = [1, 1 << 9, 1 << 10];
const MAX_POINTS_LOG2: usize = 18;
const MAX_SCALARS_LOG2: usize = 26;
fn bench_lde() {
for log_ntt_size in LOG_NTT_SIZES {
for batch_size in BATCH_SIZES {
let ntt_size = 1 << log_ntt_size;
fn ntt_scalars_batch_bls12_381(
d_inout: &mut DeviceBuffer<ScalarField_BLS12_381>,
d_twiddles: &mut DeviceBuffer<ScalarField_BLS12_381>,
batch_size: usize,
) -> i32 {
ntt_inplace_batch_bls12_381(d_inout, d_twiddles, batch_size, false, 0);
0
}
fn intt_scalars_batch_bls12_381(
d_inout: &mut DeviceBuffer<ScalarField_BLS12_381>,
d_twiddles: &mut DeviceBuffer<ScalarField_BLS12_381>,
batch_size: usize,
) -> i32 {
ntt_inplace_batch_bls12_381(d_inout, d_twiddles, batch_size, true, 0);
0
}
// copy
bench_ntt_template(
MAX_SCALARS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_scalars_bls12_381,
evaluate_scalars_batch_bls12_381,
"NTT",
false,
100,
);
bench_ntt_template(
MAX_SCALARS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_scalars_bls12_381,
interpolate_scalars_batch_bls12_381,
"iNTT",
true,
100,
);
bench_ntt_template(
MAX_POINTS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_points_bls12_381,
evaluate_points_batch_bls12_381,
"EC NTT",
false,
20,
);
bench_ntt_template(
MAX_POINTS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_points_bls12_381,
interpolate_points_batch_bls12_381,
"EC iNTT",
true,
20,
);
// inplace
bench_ntt_template(
MAX_SCALARS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_scalars_bls12_381,
ntt_scalars_batch_bls12_381,
"NTT inplace",
false,
100,
);
bench_ntt_template(
MAX_SCALARS_LOG2,
ntt_size,
batch_size,
log_ntt_size,
set_up_scalars_bls12_381,
intt_scalars_batch_bls12_381,
"iNTT inplace",
true,
100,
);
}
}
}
fn bench_ntt_template<E, S, R>(
log_max_size: usize,
ntt_size: usize,
batch_size: usize,
log_ntt_size: usize,
set_data: fn(test_size: usize, log_domain_size: usize, inverse: bool) -> (Vec<E>, DeviceBuffer<E>, DeviceBuffer<S>),
bench_fn: fn(d_evaluations: &mut DeviceBuffer<E>, d_domain: &mut DeviceBuffer<S>, batch_size: usize) -> R,
id: &str,
inverse: bool,
samples: usize,
) -> Option<(Vec<E>, R)> {
let count = ntt_size * batch_size;
let bench_id = format!("{} of size 2^{} in batch {}", id, log_ntt_size, batch_size);
if count > 1 << log_max_size {
println!("Bench size exceeded: {}", bench_id);
return None;
}
println!("{}", bench_id);
let (input, mut d_evals, mut d_domain) = set_data(ntt_size * batch_size, log_ntt_size, inverse);
let first = bench_fn(&mut d_evals, &mut d_domain, batch_size);
let start = Instant::now();
for _ in 0..samples {
bench_fn(&mut d_evals, &mut d_domain, batch_size);
}
let elapsed = start.elapsed();
println!(
"{} {:0?} us x {} = {:?}",
bench_id,
elapsed.as_micros() as f32 / (samples as f32),
samples,
elapsed
);
Some((input, first))
}
fn main() {
bench_lde();
}

23
go.mod
View File

@@ -1,23 +0,0 @@
module github.com/ingonyama-zk/icicle
go 1.20
require github.com/consensys/gnark-crypto v0.11.0
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/jfeliu007/goplantuml v1.6.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
golang.org/x/text v0.3.7 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
github.com/bits-and-blooms/bitset v1.5.0 // indirect
github.com/consensys/bavard v0.1.13
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/stretchr/testify v1.8.3
golang.org/x/sys v0.2.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

459
go.sum
View File

@@ -1,459 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/bits-and-blooms/bitset v1.5.0 h1:NpE8frKRLGHIcEzkR+gZhiioW1+WbYV6fKwD6ZIpQT8=
github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.11.0 h1:QqzHQlwEqlQr5jfWblGDkwlKHpT+4QodYqqExkAtyks=
github.com/consensys/gnark-crypto v0.11.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jfeliu007/goplantuml v1.6.2 h1:WggxwLIBLF8P9OU/G/H2+IeHpEx5I7QK780jb78twEM=
github.com/jfeliu007/goplantuml v1.6.2/go.mod h1:GnvyYGyIXD68akNFe2FBlNBypwfbpeNmVUQ4ZxJw8iI=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -1,29 +0,0 @@
CUDA_ROOT_DIR = /usr/local/cuda
NVCC = $(CUDA_ROOT_DIR)/bin/nvcc
CFLAGS = -Xcompiler -fPIC -std=c++17
LDFLAGS = -shared
FEATURES = -DG2_DEFINED
TARGET_BN254 = libbn254.so
TARGET_BLS12_381 = libbls12_381.so
TARGET_BLS12_377 = libbls12_377.so
VPATH = ../icicle/curves/bn254:../icicle/curves/bls12_377:../icicle/curves/bls12_381
SRCS_BN254 = lde.cu msm.cu projective.cu ve_mod_mult.cu
SRCS_BLS12_381 = lde.cu msm.cu projective.cu ve_mod_mult.cu poseidon.cu
SRCS_BLS12_377 = lde.cu msm.cu projective.cu ve_mod_mult.cu
all: $(TARGET_BN254) $(TARGET_BLS12_381) $(TARGET_BLS12_377)
$(TARGET_BN254):
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bn254/, $(SRCS_BN254)) -o $@
$(TARGET_BLS12_381):
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bls12_381/, $(SRCS_BLS12_381)) -o $@
$(TARGET_BLS12_377):
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bls12_377/, $(SRCS_BLS12_377)) -o $@
clean:
rm -f $(TARGET_BN254) $(TARGET_BLS12_381) $(TARGET_BLS12_377)

View File

@@ -1,49 +0,0 @@
# ICICLE CUDA to Golang Binding Guide
This guide provides instructions on how to compile CUDA code using the provided Makefile, and then how to use the resulting shared libraries to bind Golang to ICICLE's CUDA code.
## Prerequisites
To compile the CUDA files, you will need:
- CUDA toolkit installed. The Makefile assumes CUDA is installed in `/usr/local/cuda`. If CUDA is installed in a different location, please adjust the `CUDA_ROOT_DIR` variable accordingly.
- A compatible GPU and corresponding driver installed on your machine.
## Structure of the Makefile
The Makefile is designed to compile CUDA files for three curves: BN254, BLS12_381, and BLS12_377. The source files are located in the `icicle/curves/` directory.
## Compiling CUDA Code
1. Navigate to the directory containing the Makefile in your terminal.
2. To compile all curve libraries, use the `make all` command. This will create three shared libraries: `libbn254.so`, `libbls12_381.so`, and `libbls12_377.so`.
3. If you want to compile a specific curve, you can do so by specifying the target. For example, to compile only the BN254 curve, use `make libbn254.so`. Replace `libbn254.so` with `libbls12_381.so` or `libbls12_377.so` to compile those curves instead.
The resulting `.so` files are the compiled shared libraries for each curve.
## Golang Binding
The shared libraries produced from the CUDA code compilation are used to bind Golang to ICICLE's CUDA code.
1. These shared libraries (`libbn254.so`, `libbls12_381.so`, `libbls12_377.so`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
2. In your Go project, you can use `cgo` to link these shared libraries. Here's a basic example on how you can use `cgo` to link these libraries:
```go
/*
#cgo LDFLAGS: -L/path/to/shared/libs -lbn254 -lbls12_381 -lbls12_377
#include "icicle.h" // make sure you use the correct header file(s)
*/
import "C"
func main() {
// Now you can call the C functions from the ICICLE libraries.
// Note that C function calls are prefixed with 'C.' in Go code.
}
```
Replace `/path/to/shared/libs` with the actual path where the shared libraries are located on your system.
## Cleaning up
If you want to remove the compiled files, you can use the `make clean` command. This will remove the `libbn254.so`, `libbls12_381.so`, and `libbls12_377.so` files.

View File

@@ -1,324 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"unsafe"
"encoding/binary"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
const SCALAR_SIZE = 8
const BASE_SIZE = 12
type G1ScalarField struct {
S [SCALAR_SIZE]uint32
}
type G1BaseField struct {
S [BASE_SIZE]uint32
}
/*
* BaseField Constrctors
*/
func (f *G1BaseField) SetZero() *G1BaseField {
var S [BASE_SIZE]uint32
f.S = S
return f
}
func (f *G1BaseField) SetOne() *G1BaseField {
var S [BASE_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
out := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
in := (*C.BLS12_377_affine_t)(unsafe.Pointer(affine))
C.projective_from_affine_bls12_377(out, in)
return p
}
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
copy(f.S[:], limbs[:])
return f
}
/*
* BaseField methods
*/
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
return f.S
}
func (f *G1BaseField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* ScalarField methods
*/
func (p *G1ScalarField) Random() *G1ScalarField {
outC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(p))
C.random_scalar_bls12_377(outC)
return p
}
func (f *G1ScalarField) SetZero() *G1ScalarField {
var S [SCALAR_SIZE]uint32
f.S = S
return f
}
func (f *G1ScalarField) SetOne() *G1ScalarField {
var S [SCALAR_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
for i, v := range a.S {
if b.S[i] != v {
return false
}
}
return true
}
/*
* ScalarField methods
*/
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
return f.S
}
func (f *G1ScalarField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* PointBLS12_377
*/
type G1ProjectivePoint struct {
X, Y, Z G1BaseField
}
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
var yOne G1BaseField
yOne.SetOne()
var xZero G1BaseField
xZero.SetZero()
var zZero G1BaseField
zZero.SetZero()
f.X = xZero
f.Y = yOne
f.Z = zZero
return f
}
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
// Cast *PointBLS12_377 to *C.BLS12_377_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It'S your responsibility to ensure that the types are compatible.
pC := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BLS12_377_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it'S fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_bls12_377(pC, pCompareC))
}
func (p *G1ProjectivePoint) IsOnCurve() bool {
point := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
res := C.projective_is_on_curve_bls12_377(point)
return bool(res)
}
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
C.random_projective_bls12_377(outC)
return p
}
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
return &G1PointAffine{
X: p.X,
Y: p.Y,
}
}
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
var _x G1BaseField
var _y G1BaseField
var _z G1BaseField
_x.FromLimbs(GetFixedLimbs(x))
_y.FromLimbs(GetFixedLimbs(y))
_z.FromLimbs(GetFixedLimbs(z))
p.X = _x
p.Y = _y
p.Z = _z
return p
}
/*
* PointAffineNoInfinityBLS12_377
*/
type G1PointAffine struct {
X, Y G1BaseField
}
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
in := (*C.BLS12_377_projective_t)(unsafe.Pointer(projective))
out := (*C.BLS12_377_affine_t)(unsafe.Pointer(p))
C.projective_to_affine_bls12_377(out, in)
return p
}
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
var Z G1BaseField
Z.SetOne()
return &G1ProjectivePoint{
X: p.X,
Y: p.Y,
Z: Z,
}
}
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
var _x G1BaseField
var _y G1BaseField
_x.FromLimbs(GetFixedLimbs(X))
_y.FromLimbs(GetFixedLimbs(Y))
return p
}
/*
* Multiplication
*/
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
pointsC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&a[0]))
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_point_bls12_377(pointsC, scalarsC, nElementsC, deviceIdC)
}
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
aC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_scalar_bls12_377(aC, bC, nElementsC, deviceIdC)
}
// Multiply a matrix by a scalar:
//
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
c := make([]G1ScalarField, len(b))
for i := range c {
var p G1ScalarField
p.SetZero()
c[i] = p
}
aC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
cC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&c[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.matrix_vec_mod_mult_bls12_377(aC, bC, cC, nElementsC, deviceIdC)
}
/*
* Utils
*/
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
if len(*slice) <= BASE_SIZE {
limbs := [BASE_SIZE]uint32{}
copy(limbs[:len(*slice)], *slice)
return limbs
}
panic("slice has too many elements")
}

View File

@@ -1,198 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"encoding/binary"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewFieldBLS12_377One(t *testing.T) {
var oneField G1BaseField
oneField.SetOne()
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, oneField.S, rawOneField)
}
func TestNewFieldBLS12_377Zero(t *testing.T) {
var zeroField G1BaseField
zeroField.SetZero()
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, zeroField.S, rawZeroField)
}
func TestFieldBLS12_377ToBytesLe(t *testing.T) {
var p G1ProjectivePoint
p.Random()
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
for i, v := range p.X.S {
binary.LittleEndian.PutUint32(expected[i*4:], v)
}
assert.Equal(t, p.X.ToBytesLe(), expected)
assert.Equal(t, len(p.X.ToBytesLe()), 32)
}
func TestNewPointBLS12_377Zero(t *testing.T) {
var pointZero G1ProjectivePoint
pointZero.SetZero()
var baseOne G1BaseField
baseOne.SetOne()
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, pointZero.X, zeroSanity)
assert.Equal(t, pointZero.Y, baseOne)
assert.Equal(t, pointZero.Z, zeroSanity)
}
func TestFromProjectiveToAffine(t *testing.T) {
var projective G1ProjectivePoint
var affine G1PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G1ProjectivePoint
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestBLS12_377Eq(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
var p2 G1ProjectivePoint
p2.Random()
assert.Equal(t, p1.Eq(&p1), true)
assert.Equal(t, p1.Eq(&p2), false)
}
func TestBLS12_377StripZ(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
p2ZLess := p1.StripZ()
assert.IsType(t, G1PointAffine{}, *p2ZLess)
assert.Equal(t, p1.X, p2ZLess.X)
assert.Equal(t, p1.Y, p2ZLess.Y)
}
func TestPointBLS12_377fromLimbs(t *testing.T) {
var p G1ProjectivePoint
p.Random()
x := p.X.Limbs()
y := p.Y.Limbs()
z := p.Z.Limbs()
xSlice := x[:]
ySlice := y[:]
zSlice := z[:]
var pFromLimbs G1ProjectivePoint
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
assert.Equal(t, pFromLimbs, p)
}
func TestNewPointAffineNoInfinityBLS12_377Zero(t *testing.T) {
var zeroP G1PointAffine
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, zeroP.X, zeroSanity)
assert.Equal(t, zeroP.Y, zeroSanity)
}
func TestPointAffineNoInfinityBLS12_377FromLimbs(t *testing.T) {
// Initialize your test values
x := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
y := [8]uint32{9, 10, 11, 12, 13, 14, 15, 16}
xSlice := x[:]
ySlice := y[:]
// Execute your function
var result G1PointAffine
result.FromLimbs(&xSlice, &ySlice)
var xBase G1BaseField
var yBase G1BaseField
xBase.FromLimbs(x)
yBase.FromLimbs(y)
// Define your expected result
expected := &G1PointAffine{
X: xBase,
Y: yBase,
}
// Test if result is as expected
assert.Equal(t, result, expected)
}
func TestGetFixedLimbs(t *testing.T) {
t.Run("case of valid input of length less than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of valid input of length 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of empty input", func(t *testing.T) {
slice := []uint32{}
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of input length greater than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
defer func() {
if r := recover(); r == nil {
t.Errorf("the code did not panic")
}
}()
GetFixedLimbs(&slice)
})
}

View File

@@ -1,112 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"encoding/binary"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
// G2 extension field
type G2Element [6]uint64
type ExtentionField struct {
A0, A1 G2Element
}
type G2PointAffine struct {
X, Y ExtentionField
}
type G2Point struct {
X, Y, Z ExtentionField
}
func (p *G2Point) Random() *G2Point {
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
C.random_g2_projective_bls12_377(outC)
return p
}
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
out := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
in := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(affine))
C.g2_projective_from_affine_bls12_377(out, in)
return p
}
func (p *G2Point) Eq(pCompare *G2Point) bool {
// Cast *PointBLS12_377 to *C.BLS12_377_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It's your responsibility to ensure that the types are compatible.
pC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it's fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_g2_bls12_377(pC, pCompareC))
}
func (f *G2Element) ToBytesLe() []byte {
var bytes []byte
for _, val := range f {
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
binary.LittleEndian.PutUint64(buf, val)
bytes = append(bytes, buf...)
}
return bytes
}
func (p *G2PointAffine) ToProjective() G2Point {
return G2Point{
X: p.X,
Y: p.Y,
Z: ExtentionField{
A0: G2Element{1, 0, 0, 0},
A1: G2Element{0, 0, 0, 0},
},
}
}
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
out := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(p))
in := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(projective))
C.g2_projective_to_affine_bls12_377(out, in)
return p
}
func (p *G2Point) IsOnCurve() bool {
// Directly copy memory from the C struct to the Go struct
point := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
res := C.g2_projective_is_on_curve_bls12_377(point)
return bool(res)
}

View File

@@ -1,75 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestG2Eqg2(t *testing.T) {
var point G2Point
point.Random()
assert.True(t, point.Eq(&point))
}
func TestG2FromProjectiveToAffine(t *testing.T) {
var projective G2Point
var affine G2PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G2Point
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestG2Eqg2NotEqual(t *testing.T) {
var point G2Point
point.Random()
var point2 G2Point
point2.Random()
assert.False(t, point.Eq(&point2))
}
func TestG2ToBytes(t *testing.T) {
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
bytes := element.ToBytesLe()
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
}
func TestG2ShouldConvertToProjective(t *testing.T) {
var pointProjective G2Point
var pointAffine G2PointAffine
pointProjective.Random()
pointAffine.FromProjective(&pointProjective)
proj := pointAffine.ToProjective()
assert.True(t, proj.IsOnCurve())
assert.True(t, pointProjective.Eq(&proj))
}

View File

@@ -1,98 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdbool.h>
// msm.h
#ifndef _BLS12_377_MSM_H
#define _BLS12_377_MSM_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BLS12_377 projective and affine structs
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
typedef cudaStream_t CudaStream_t;
int msm_cuda_bls12_377(
BLS12_377_projective_t* out, BLS12_377_affine_t* points, BLS12_377_scalar_t* scalars, size_t count, size_t device_id);
int msm_batch_cuda_bls12_377(
BLS12_377_projective_t* out,
BLS12_377_affine_t* points,
BLS12_377_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_scalar_t* d_scalars,
BLS12_377_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_scalar_t* d_scalars,
BLS12_377_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id);
int msm_g2_cuda_bls12_377(
BLS12_377_g2_projective_t* out,
BLS12_377_g2_affine_t* points,
BLS12_377_scalar_t* scalars,
size_t count,
size_t device_id);
int msm_batch_g2_cuda_bls12_377(
BLS12_377_g2_projective_t* out,
BLS12_377_g2_affine_t* points,
BLS12_377_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_g2_cuda_bls12_377(
BLS12_377_g2_projective_t* d_out,
BLS12_377_scalar_t* d_scalars,
BLS12_377_g2_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_g2_cuda_bls12_377(
BLS12_377_g2_projective_t* d_out,
BLS12_377_scalar_t* d_scalars,
BLS12_377_g2_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_377_MSM_H */

View File

@@ -1,195 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ntt.h
#ifndef _BLS12_377_NTT_H
#define _BLS12_377_NTT_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BLS12_377 projective and affine structs
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
int ntt_cuda_bls12_377(BLS12_377_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
int ntt_batch_cuda_bls12_377(
BLS12_377_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
int ecntt_cuda_bls12_377(BLS12_377_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
int ecntt_batch_cuda_bls12_377(
BLS12_377_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
BLS12_377_scalar_t*
build_domain_cuda_bls12_377(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
int interpolate_scalars_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
unsigned device_id,
size_t stream);
int interpolate_scalars_batch_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_points_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
size_t device_id,
size_t stream);
int interpolate_points_batch_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_scalars_on_coset_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
BLS12_377_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int interpolate_scalars_batch_on_coset_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_evaluations,
BLS12_377_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
BLS12_377_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_scalars_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned device_id,
size_t stream);
int evaluate_scalars_batch_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_points_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
size_t device_id,
size_t stream);
int evaluate_points_batch_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_scalars_on_coset_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BLS12_377_scalar_t* coset_powers,
unsigned device_id,
size_t stream);
int evaluate_scalars_on_coset_batch_cuda_bls12_377(
BLS12_377_scalar_t* d_out,
BLS12_377_scalar_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BLS12_377_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BLS12_377_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_batch_cuda_bls12_377(
BLS12_377_projective_t* d_out,
BLS12_377_projective_t* d_coefficients,
BLS12_377_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BLS12_377_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int reverse_order_scalars_cuda_bls12_377(BLS12_377_scalar_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_scalars_batch_cuda_bls12_377(
BLS12_377_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int reverse_order_points_cuda_bls12_377(BLS12_377_projective_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_points_batch_cuda_bls12_377(
BLS12_377_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int add_scalars_cuda_bls12_377(
BLS12_377_scalar_t* d_out, BLS12_377_scalar_t* d_in1, BLS12_377_scalar_t* d_in2, unsigned n, size_t stream);
int sub_scalars_cuda_bls12_377(
BLS12_377_scalar_t* d_out, BLS12_377_scalar_t* d_in1, BLS12_377_scalar_t* d_in2, unsigned n, size_t stream);
int to_montgomery_scalars_cuda_bls12_377(BLS12_377_scalar_t* d_inout, unsigned n, size_t stream);
int from_montgomery_scalars_cuda_bls12_377(BLS12_377_scalar_t* d_inout, unsigned n, size_t stream);
// points g1
int to_montgomery_proj_points_cuda_bls12_377(BLS12_377_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_cuda_bls12_377(BLS12_377_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_cuda_bls12_377(BLS12_377_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_cuda_bls12_377(BLS12_377_affine_t* d_inout, unsigned n, size_t stream);
// points g2
int to_montgomery_proj_points_g2_cuda_bls12_377(BLS12_377_g2_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_g2_cuda_bls12_377(BLS12_377_g2_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_g2_cuda_bls12_377(BLS12_377_g2_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_g2_cuda_bls12_377(BLS12_377_g2_affine_t* d_inout, unsigned n, size_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_377_NTT_H */

View File

@@ -1,50 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// projective.h
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
bool projective_is_on_curve_bls12_377(BLS12_377_projective_t* point1);
int random_scalar_bls12_377(BLS12_377_scalar_t* out);
int random_projective_bls12_377(BLS12_377_projective_t* out);
BLS12_377_projective_t* projective_zero_bls12_377();
int projective_to_affine_bls12_377(BLS12_377_affine_t* out, BLS12_377_projective_t* point1);
int projective_from_affine_bls12_377(BLS12_377_projective_t* out, BLS12_377_affine_t* point1);
int random_g2_projective_bls12_377(BLS12_377_g2_projective_t* out);
int g2_projective_to_affine_bls12_377(BLS12_377_g2_affine_t* out, BLS12_377_g2_projective_t* point1);
int g2_projective_from_affine_bls12_377(BLS12_377_g2_projective_t* out, BLS12_377_g2_affine_t* point1);
bool g2_projective_is_on_curve_bls12_377(BLS12_377_g2_projective_t* point1);
bool eq_bls12_377(BLS12_377_projective_t* point1, BLS12_377_projective_t* point2);
bool eq_g2_bls12_377(BLS12_377_g2_projective_t* point1, BLS12_377_g2_projective_t* point2);
#ifdef __cplusplus
}
#endif

View File

@@ -1,49 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ve_mod_mult.h
#ifndef _BLS12_377_VEC_MULT_H
#define _BLS12_377_VEC_MULT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
int32_t vec_mod_mult_point_bls12_377(
BLS12_377_projective_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t vec_mod_mult_scalar_bls12_377(
BLS12_377_scalar_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t vec_mod_mult_device_scalar_bls12_377(
BLS12_377_scalar_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
int32_t matrix_vec_mod_mult_bls12_377(
BLS12_377_scalar_t* matrix_flattened,
BLS12_377_scalar_t* input,
BLS12_377_scalar_t* output,
size_t n_elments,
size_t device_id);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_377_VEC_MULT_H */

View File

@@ -1,208 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"errors"
"fmt"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
// #include "msm.h"
import "C"
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BLS12_377_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(out))
ret := C.msm_cuda_bls12_377(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_cuda_bls12_377 returned error code: %d", ret)
}
return out, nil
}
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(out))
ret := C.msm_g2_cuda_bls12_377(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_g2_cuda_bls12_377 returned error code: %d", ret)
}
return out, nil
}
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G1ProjectivePoint, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G1ProjectivePoint, batchSize)
for i := 0; i < len(out); i++ {
var p G1ProjectivePoint
p.SetZero()
out[i] = &p
}
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BLS12_377_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_cuda_bls12_377(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bls12_377 returned error code: %d", ret)
}
return out, nil
}
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G2Point, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G2Point, batchSize)
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_g2_cuda_bls12_377(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bls12_377 returned error code: %d", ret)
}
return out, nil
}
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BLS12_377_projective_t)(d_out)
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
pointsC := (*C.BLS12_377_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BLS12_377_g2_projective_t)(d_out)
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_g2_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BLS12_377_projective_t)(d_out)
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
pointsC := (*C.BLS12_377_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.commit_batch_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BLS12_377_g2_projective_t)(d_out)
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.msm_batch_g2_cuda_bls12_377(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}

View File

@@ -1,355 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"fmt"
"math"
"testing"
"time"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
"github.com/stretchr/testify/assert"
)
func GeneratePoints(count int) []G1PointAffine {
// Declare a slice of integers
var points []G1PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var pointProjective G1ProjectivePoint
pointProjective.Random()
var pointAffine G1PointAffine
pointAffine.FromProjective(&pointProjective)
points = append(points, pointAffine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func GeneratePointsProj(count int) []G1ProjectivePoint {
// Declare a slice of integers
var points []G1ProjectivePoint
// Use a loop to populate the slice
for i := 0; i < count; i++ {
var p G1ProjectivePoint
p.Random()
points = append(points, p)
}
return points
}
func GenerateScalars(count int, skewed bool) []G1ScalarField {
// Declare a slice of integers
var scalars []G1ScalarField
var rand G1ScalarField
var zero G1ScalarField
var one G1ScalarField
var randLarge G1ScalarField
zero.SetZero()
one.SetOne()
randLarge.Random()
if skewed && count > 1_200_000 {
for i := 0; i < count-1_200_000; i++ {
rand.Random()
scalars = append(scalars, rand)
}
for i := 0; i < 600_000; i++ {
scalars = append(scalars, randLarge)
}
for i := 0; i < 400_000; i++ {
scalars = append(scalars, zero)
}
for i := 0; i < 200_000; i++ {
scalars = append(scalars, one)
}
} else {
for i := 0; i < count; i++ {
rand.Random()
scalars = append(scalars, rand)
}
}
return scalars[:count]
}
func TestMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out := new(G1ProjectivePoint)
startTime := time.Now()
_, e := Msm(out, points, scalars, 0) // non mont
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func TestCommitMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1<<v - 1
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := count * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := Commit(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G1ProjectivePoint, 1)
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
assert.Equal(t, e, 0, "error should be 0")
assert.True(t, outHost[0].IsOnCurve())
}
}
func BenchmarkCommit(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := msmSize * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := msmSize * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
if e != 0 {
panic("Error occured")
}
}
})
}
}
func TestBenchMSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GeneratePoints(count)
scalars := GenerateScalars(count, false)
a, e := MsmBatch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBLS12_377 returned an error: %v", e)
}
if len(a) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(a))
}
}
}
}
func BenchmarkMSM(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G1ProjectivePoint)
_, e := Msm(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
// G2
func GenerateG2Points(count int) []G2PointAffine {
// Declare a slice of integers
var points []G2PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var p G2Point
p.Random()
var affine G2PointAffine
affine.FromProjective(&p)
points = append(points, affine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func TestMsmG2BLS12_377(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, false)
fmt.Print("Finished generating scalars\n")
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func BenchmarkMsmG2BLS12_377(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GenerateG2Points(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
func TestCommitG2MSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
var sizeCheckG2PointAffine G2PointAffine
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
var sizeCheckG2Point G2Point
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := CommitG2(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G2Point, 1)
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
assert.Equal(t, e, 0, "error should be 0")
assert.Equal(t, len(outHost), 1)
result := outHost[0]
assert.True(t, result.IsOnCurve())
}
}
func TestBatchG2MSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GenerateG2Points(count)
scalars := GenerateScalars(count, false)
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBLS12_377 returned an error: %v", e)
}
if len(pointsResults) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
}
for _, s := range pointsResults {
assert.True(t, s.IsOnCurve())
}
}
}
}

View File

@@ -1,221 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"errors"
"fmt"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
// #include "ntt.h"
import "C"
const (
NONE = 0
DIF = 1
DIT = 2
)
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
ret := C.ntt_cuda_bls12_377(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
return uint64(ret)
}
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
isInverseC := C.bool(isInverse)
batchSizeC := C.uint32_t(batchSize)
deviceIdC := C.size_t(deviceId)
ret := C.ntt_batch_cuda_bls12_377(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
valuesC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
ret := C.ecntt_cuda_bls12_377(valuesC, n, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
valuesC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
batchSizeC := C.uint32_t(batchSize)
ret := C.ecntt_batch_cuda_bls12_377(valuesC, n, batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
domain_size := C.uint32_t(d_size)
logn := C.uint32_t(log_d_size)
is_inverse := C.bool(inverse)
dp := C.build_domain_cuda_bls12_377(domain_size, logn, is_inverse, 0, 0)
if dp == nil {
err = errors.New("nullptr returned from generating twiddles")
return unsafe.Pointer(nil), err
}
return unsafe.Pointer(dp), nil
}
// Reverses d_scalars in-place
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
lenC := C.int(len)
if success := C.reverse_order_scalars_cuda_bls12_377(scalarsC, lenC, 0, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
size_d := size * 32
dp, err := goicicle.CudaMalloc(size_d)
if err != nil {
return nil
}
d_out := (*C.BLS12_377_scalar_t)(dp)
scalarsC := (*C.BLS12_377_scalar_t)(scalars)
twiddlesC := (*C.BLS12_377_scalar_t)(twiddles)
cosetPowersC := (*C.BLS12_377_scalar_t)(cosetPowers)
sizeC := C.uint(size)
var ret C.int
if isCoset {
ret = C.interpolate_scalars_on_coset_cuda_bls12_377(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
} else {
ret = C.interpolate_scalars_cuda_bls12_377(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
}
return unsafe.Pointer(d_out)
}
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
scalars_outC := (*C.BLS12_377_scalar_t)(scalars_out)
scalarsC := (*C.BLS12_377_scalar_t)(scalars)
twiddlesC := (*C.BLS12_377_scalar_t)(twiddles)
coset_powersC := (*C.BLS12_377_scalar_t)(coset_powers)
sizeC := C.uint(scalars_size)
twiddlesC_size := C.uint(twiddles_size)
var ret C.int
if isCoset {
ret = C.evaluate_scalars_on_coset_cuda_bls12_377(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
} else {
ret = C.evaluate_scalars_cuda_bls12_377(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
return -1
}
return 0
}
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BLS12_377_scalar_t)(in1_d)
in2_dC := (*C.BLS12_377_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.add_scalars_cuda_bls12_377(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error adding scalar vectors")
return -1
}
return 0
}
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BLS12_377_scalar_t)(in1_d)
in2_dC := (*C.BLS12_377_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.sub_scalars_cuda_bls12_377(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error subtracting scalar vectors")
return -1
}
return 0
}
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.to_montgomery_scalars_cuda_bls12_377(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.from_montgomery_scalars_cuda_bls12_377(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BLS12_377_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_cuda_bls12_377(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_g2_cuda_bls12_377(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}

View File

@@ -1,148 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
import (
"fmt"
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
func TestNttBLS12_377BBB(t *testing.T) {
count := 1 << 20
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
NttBatch(&nttResult, false, count, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBLS12_377CompareToGnarkDIF(t *testing.T) {
count := 1 << 2
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, DIF, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestINttBLS12_377CompareToGnarkDIT(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, true, DIT, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBLS12_377(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, NONE, 0)
assert.NotEqual(t, nttResult, scalars)
inttResult := make([]G1ScalarField, len(nttResult))
copy(inttResult, nttResult)
assert.Equal(t, inttResult, nttResult)
Ntt(&inttResult, true, NONE, 0)
assert.Equal(t, inttResult, scalars)
}
func TestNttBatchBLS12_377(t *testing.T) {
count := 1 << 5
batches := 4
scalars := GenerateScalars(count*batches, false)
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
for i := 0; i < batches; i++ {
start := i * count
end := (i + 1) * count
batch := make([]G1ScalarField, len(scalars[start:end]))
copy(batch, scalars[start:end])
scalarVecOfVec = append(scalarVecOfVec, batch)
}
nttBatchResult := make([]G1ScalarField, len(scalars))
copy(nttBatchResult, scalars)
NttBatch(&nttBatchResult, false, count, 0)
var nttResultVecOfVec [][]G1ScalarField
for i := 0; i < batches; i++ {
// Clone the slice
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
copy(clone, scalarVecOfVec[i])
// Add it to the result vector of vectors
nttResultVecOfVec = append(nttResultVecOfVec, clone)
// Call the ntt_bls12_377 function
Ntt(&nttResultVecOfVec[i], false, NONE, 0)
}
assert.NotEqual(t, nttBatchResult, scalars)
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i := 0; i < batches; i++ {
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
}
}
}
func BenchmarkNTT(b *testing.B) {
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
for _, logNTTSize := range LOG_NTT_SIZES {
nttSize := 1 << logNTTSize
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
scalars := GenerateScalars(nttSize, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
for n := 0; n < b.N; n++ {
Ntt(&nttResult, false, NONE, 0)
}
})
}
}

View File

@@ -1,38 +0,0 @@
package bls12377
import "encoding/binary"
// Function to convert [8]uint32 to [4]uint64
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
var arr64 [4]uint64
for i := 0; i < len(arr32); i += 2 {
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
}
return arr64
}
func ConvertUint64ArrToUint32Arr4(arr64 [4]uint64) [8]uint32 {
var arr32 [8]uint32
for i, v := range arr64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, v)
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
}
return arr32
}
func ConvertUint64ArrToUint32Arr6(arr64 [6]uint64) [12]uint32 {
var arr32 [12]uint32
for i, v := range arr64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, v)
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
}
return arr32
}

View File

@@ -1,41 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12377
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
// #include "ve_mod_mult.h"
import "C"
import (
"fmt"
"unsafe"
)
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
scalarVec1C := (*C.BLS12_377_scalar_t)(scalarVec1)
scalarVec2C := (*C.BLS12_377_scalar_t)(scalarVec2)
sizeC := C.size_t(size)
ret := C.vec_mod_mult_device_scalar_bls12_377(scalarVec1C, scalarVec2C, sizeC, 0)
if ret != 0 {
fmt.Print("error multiplying scalar vectors")
return -1
}
return 0
}

View File

@@ -1,324 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"unsafe"
"encoding/binary"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
const SCALAR_SIZE = 8
const BASE_SIZE = 12
type G1ScalarField struct {
S [SCALAR_SIZE]uint32
}
type G1BaseField struct {
S [BASE_SIZE]uint32
}
/*
* BaseField Constrctors
*/
func (f *G1BaseField) SetZero() *G1BaseField {
var S [BASE_SIZE]uint32
f.S = S
return f
}
func (f *G1BaseField) SetOne() *G1BaseField {
var S [BASE_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
out := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
in := (*C.BLS12_381_affine_t)(unsafe.Pointer(affine))
C.projective_from_affine_bls12_381(out, in)
return p
}
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
copy(f.S[:], limbs[:])
return f
}
/*
* BaseField methods
*/
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
return f.S
}
func (f *G1BaseField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* ScalarField methods
*/
func (p *G1ScalarField) Random() *G1ScalarField {
outC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(p))
C.random_scalar_bls12_381(outC)
return p
}
func (f *G1ScalarField) SetZero() *G1ScalarField {
var S [SCALAR_SIZE]uint32
f.S = S
return f
}
func (f *G1ScalarField) SetOne() *G1ScalarField {
var S [SCALAR_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
for i, v := range a.S {
if b.S[i] != v {
return false
}
}
return true
}
/*
* ScalarField methods
*/
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
return f.S
}
func (f *G1ScalarField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* PointBLS12_381
*/
type G1ProjectivePoint struct {
X, Y, Z G1BaseField
}
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
var yOne G1BaseField
yOne.SetOne()
var xZero G1BaseField
xZero.SetZero()
var zZero G1BaseField
zZero.SetZero()
f.X = xZero
f.Y = yOne
f.Z = zZero
return f
}
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
// Cast *PointBLS12_381 to *C.BLS12_381_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It'S your responsibility to ensure that the types are compatible.
pC := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BLS12_381_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it'S fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_bls12_381(pC, pCompareC))
}
func (p *G1ProjectivePoint) IsOnCurve() bool {
point := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
res := C.projective_is_on_curve_bls12_381(point)
return bool(res)
}
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
C.random_projective_bls12_381(outC)
return p
}
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
return &G1PointAffine{
X: p.X,
Y: p.Y,
}
}
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
var _x G1BaseField
var _y G1BaseField
var _z G1BaseField
_x.FromLimbs(GetFixedLimbs(x))
_y.FromLimbs(GetFixedLimbs(y))
_z.FromLimbs(GetFixedLimbs(z))
p.X = _x
p.Y = _y
p.Z = _z
return p
}
/*
* PointAffineNoInfinityBLS12_381
*/
type G1PointAffine struct {
X, Y G1BaseField
}
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
in := (*C.BLS12_381_projective_t)(unsafe.Pointer(projective))
out := (*C.BLS12_381_affine_t)(unsafe.Pointer(p))
C.projective_to_affine_bls12_381(out, in)
return p
}
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
var Z G1BaseField
Z.SetOne()
return &G1ProjectivePoint{
X: p.X,
Y: p.Y,
Z: Z,
}
}
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
var _x G1BaseField
var _y G1BaseField
_x.FromLimbs(GetFixedLimbs(X))
_y.FromLimbs(GetFixedLimbs(Y))
return p
}
/*
* Multiplication
*/
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
pointsC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&a[0]))
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_point_bls12_381(pointsC, scalarsC, nElementsC, deviceIdC)
}
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
aC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_scalar_bls12_381(aC, bC, nElementsC, deviceIdC)
}
// Multiply a matrix by a scalar:
//
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
c := make([]G1ScalarField, len(b))
for i := range c {
var p G1ScalarField
p.SetZero()
c[i] = p
}
aC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
cC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&c[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.matrix_vec_mod_mult_bls12_381(aC, bC, cC, nElementsC, deviceIdC)
}
/*
* Utils
*/
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
if len(*slice) <= BASE_SIZE {
limbs := [BASE_SIZE]uint32{}
copy(limbs[:len(*slice)], *slice)
return limbs
}
panic("slice has too many elements")
}

View File

@@ -1,198 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"encoding/binary"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewFieldBLS12_381One(t *testing.T) {
var oneField G1BaseField
oneField.SetOne()
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, oneField.S, rawOneField)
}
func TestNewFieldBLS12_381Zero(t *testing.T) {
var zeroField G1BaseField
zeroField.SetZero()
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, zeroField.S, rawZeroField)
}
func TestFieldBLS12_381ToBytesLe(t *testing.T) {
var p G1ProjectivePoint
p.Random()
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
for i, v := range p.X.S {
binary.LittleEndian.PutUint32(expected[i*4:], v)
}
assert.Equal(t, p.X.ToBytesLe(), expected)
assert.Equal(t, len(p.X.ToBytesLe()), 32)
}
func TestNewPointBLS12_381Zero(t *testing.T) {
var pointZero G1ProjectivePoint
pointZero.SetZero()
var baseOne G1BaseField
baseOne.SetOne()
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, pointZero.X, zeroSanity)
assert.Equal(t, pointZero.Y, baseOne)
assert.Equal(t, pointZero.Z, zeroSanity)
}
func TestFromProjectiveToAffine(t *testing.T) {
var projective G1ProjectivePoint
var affine G1PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G1ProjectivePoint
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestBLS12_381Eq(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
var p2 G1ProjectivePoint
p2.Random()
assert.Equal(t, p1.Eq(&p1), true)
assert.Equal(t, p1.Eq(&p2), false)
}
func TestBLS12_381StripZ(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
p2ZLess := p1.StripZ()
assert.IsType(t, G1PointAffine{}, *p2ZLess)
assert.Equal(t, p1.X, p2ZLess.X)
assert.Equal(t, p1.Y, p2ZLess.Y)
}
func TestPointBLS12_381fromLimbs(t *testing.T) {
var p G1ProjectivePoint
p.Random()
x := p.X.Limbs()
y := p.Y.Limbs()
z := p.Z.Limbs()
xSlice := x[:]
ySlice := y[:]
zSlice := z[:]
var pFromLimbs G1ProjectivePoint
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
assert.Equal(t, pFromLimbs, p)
}
func TestNewPointAffineNoInfinityBLS12_381Zero(t *testing.T) {
var zeroP G1PointAffine
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, zeroP.X, zeroSanity)
assert.Equal(t, zeroP.Y, zeroSanity)
}
func TestPointAffineNoInfinityBLS12_381FromLimbs(t *testing.T) {
// Initialize your test values
x := [12]uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
y := [12]uint32{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
xSlice := x[:]
ySlice := y[:]
// Execute your function
var result G1PointAffine
result.FromLimbs(&xSlice, &ySlice)
var xBase G1BaseField
var yBase G1BaseField
xBase.FromLimbs(x)
yBase.FromLimbs(y)
// Define your expected result
expected := &G1PointAffine{
X: xBase,
Y: yBase,
}
// Test if result is as expected
assert.Equal(t, result, expected)
}
func TestGetFixedLimbs(t *testing.T) {
t.Run("case of valid input of length less than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of valid input of length 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of empty input", func(t *testing.T) {
slice := []uint32{}
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of input length greater than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
defer func() {
if r := recover(); r == nil {
t.Errorf("the code did not panic")
}
}()
GetFixedLimbs(&slice)
})
}

View File

@@ -1,112 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"encoding/binary"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
// G2 extension field
type G2Element [6]uint64
type ExtentionField struct {
A0, A1 G2Element
}
type G2PointAffine struct {
X, Y ExtentionField
}
type G2Point struct {
X, Y, Z ExtentionField
}
func (p *G2Point) Random() *G2Point {
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
C.random_g2_projective_bls12_381(outC)
return p
}
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
out := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
in := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(affine))
C.g2_projective_from_affine_bls12_381(out, in)
return p
}
func (p *G2Point) Eq(pCompare *G2Point) bool {
// Cast *PointBLS12_381 to *C.BLS12_381_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It's your responsibility to ensure that the types are compatible.
pC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it's fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_g2_bls12_381(pC, pCompareC))
}
func (f *G2Element) ToBytesLe() []byte {
var bytes []byte
for _, val := range f {
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
binary.LittleEndian.PutUint64(buf, val)
bytes = append(bytes, buf...)
}
return bytes
}
func (p *G2PointAffine) ToProjective() G2Point {
return G2Point{
X: p.X,
Y: p.Y,
Z: ExtentionField{
A0: G2Element{1, 0, 0, 0},
A1: G2Element{0, 0, 0, 0},
},
}
}
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
out := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(p))
in := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(projective))
C.g2_projective_to_affine_bls12_381(out, in)
return p
}
func (p *G2Point) IsOnCurve() bool {
// Directly copy memory from the C struct to the Go struct
point := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
res := C.g2_projective_is_on_curve_bls12_381(point)
return bool(res)
}

View File

@@ -1,75 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestG2Eqg2(t *testing.T) {
var point G2Point
point.Random()
assert.True(t, point.Eq(&point))
}
func TestG2FromProjectiveToAffine(t *testing.T) {
var projective G2Point
var affine G2PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G2Point
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestG2Eqg2NotEqual(t *testing.T) {
var point G2Point
point.Random()
var point2 G2Point
point2.Random()
assert.False(t, point.Eq(&point2))
}
func TestG2ToBytes(t *testing.T) {
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
bytes := element.ToBytesLe()
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
}
func TestG2ShouldConvertToProjective(t *testing.T) {
var pointProjective G2Point
var pointAffine G2PointAffine
pointProjective.Random()
pointAffine.FromProjective(&pointProjective)
proj := pointAffine.ToProjective()
assert.True(t, proj.IsOnCurve())
assert.True(t, pointProjective.Eq(&proj))
}

View File

@@ -1,98 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdbool.h>
// msm.h
#ifndef _BLS12_381_MSM_H
#define _BLS12_381_MSM_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BLS12_381 projective and affine structs
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
typedef cudaStream_t CudaStream_t;
int msm_cuda_bls12_381(
BLS12_381_projective_t* out, BLS12_381_affine_t* points, BLS12_381_scalar_t* scalars, size_t count, size_t device_id);
int msm_batch_cuda_bls12_381(
BLS12_381_projective_t* out,
BLS12_381_affine_t* points,
BLS12_381_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_scalar_t* d_scalars,
BLS12_381_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_scalar_t* d_scalars,
BLS12_381_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id);
int msm_g2_cuda_bls12_381(
BLS12_381_g2_projective_t* out,
BLS12_381_g2_affine_t* points,
BLS12_381_scalar_t* scalars,
size_t count,
size_t device_id);
int msm_batch_g2_cuda_bls12_381(
BLS12_381_g2_projective_t* out,
BLS12_381_g2_affine_t* points,
BLS12_381_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_g2_cuda_bls12_381(
BLS12_381_g2_projective_t* d_out,
BLS12_381_scalar_t* d_scalars,
BLS12_381_g2_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_g2_cuda_bls12_381(
BLS12_381_g2_projective_t* d_out,
BLS12_381_scalar_t* d_scalars,
BLS12_381_g2_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_381_MSM_H */

View File

@@ -1,195 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ntt.h
#ifndef _BLS12_381_NTT_H
#define _BLS12_381_NTT_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BLS12_381 projective and affine structs
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
int ntt_cuda_bls12_381(BLS12_381_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
int ntt_batch_cuda_bls12_381(
BLS12_381_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
int ecntt_cuda_bls12_381(BLS12_381_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
int ecntt_batch_cuda_bls12_381(
BLS12_381_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
BLS12_381_scalar_t*
build_domain_cuda_bls12_381(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
int interpolate_scalars_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
unsigned device_id,
size_t stream);
int interpolate_scalars_batch_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_points_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
size_t device_id,
size_t stream);
int interpolate_points_batch_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_scalars_on_coset_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
BLS12_381_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int interpolate_scalars_batch_on_coset_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_evaluations,
BLS12_381_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
BLS12_381_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_scalars_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned device_id,
size_t stream);
int evaluate_scalars_batch_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_points_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
size_t device_id,
size_t stream);
int evaluate_points_batch_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_scalars_on_coset_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BLS12_381_scalar_t* coset_powers,
unsigned device_id,
size_t stream);
int evaluate_scalars_on_coset_batch_cuda_bls12_381(
BLS12_381_scalar_t* d_out,
BLS12_381_scalar_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BLS12_381_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BLS12_381_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_batch_cuda_bls12_381(
BLS12_381_projective_t* d_out,
BLS12_381_projective_t* d_coefficients,
BLS12_381_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BLS12_381_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int reverse_order_scalars_cuda_bls12_381(BLS12_381_scalar_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_scalars_batch_cuda_bls12_381(
BLS12_381_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int reverse_order_points_cuda_bls12_381(BLS12_381_projective_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_points_batch_cuda_bls12_381(
BLS12_381_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int add_scalars_cuda_bls12_381(
BLS12_381_scalar_t* d_out, BLS12_381_scalar_t* d_in1, BLS12_381_scalar_t* d_in2, unsigned n, size_t stream);
int sub_scalars_cuda_bls12_381(
BLS12_381_scalar_t* d_out, BLS12_381_scalar_t* d_in1, BLS12_381_scalar_t* d_in2, unsigned n, size_t stream);
int to_montgomery_scalars_cuda_bls12_381(BLS12_381_scalar_t* d_inout, unsigned n, size_t stream);
int from_montgomery_scalars_cuda_bls12_381(BLS12_381_scalar_t* d_inout, unsigned n, size_t stream);
// points g1
int to_montgomery_proj_points_cuda_bls12_381(BLS12_381_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_cuda_bls12_381(BLS12_381_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_cuda_bls12_381(BLS12_381_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_cuda_bls12_381(BLS12_381_affine_t* d_inout, unsigned n, size_t stream);
// points g2
int to_montgomery_proj_points_g2_cuda_bls12_381(BLS12_381_g2_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_g2_cuda_bls12_381(BLS12_381_g2_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_g2_cuda_bls12_381(BLS12_381_g2_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_g2_cuda_bls12_381(BLS12_381_g2_affine_t* d_inout, unsigned n, size_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_381_NTT_H */

View File

@@ -1,50 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// projective.h
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
bool projective_is_on_curve_bls12_381(BLS12_381_projective_t* point1);
int random_scalar_bls12_381(BLS12_381_scalar_t* out);
int random_projective_bls12_381(BLS12_381_projective_t* out);
BLS12_381_projective_t* projective_zero_bls12_381();
int projective_to_affine_bls12_381(BLS12_381_affine_t* out, BLS12_381_projective_t* point1);
int projective_from_affine_bls12_381(BLS12_381_projective_t* out, BLS12_381_affine_t* point1);
int random_g2_projective_bls12_381(BLS12_381_g2_projective_t* out);
int g2_projective_to_affine_bls12_381(BLS12_381_g2_affine_t* out, BLS12_381_g2_projective_t* point1);
int g2_projective_from_affine_bls12_381(BLS12_381_g2_projective_t* out, BLS12_381_g2_affine_t* point1);
bool g2_projective_is_on_curve_bls12_381(BLS12_381_g2_projective_t* point1);
bool eq_bls12_381(BLS12_381_projective_t* point1, BLS12_381_projective_t* point2);
bool eq_g2_bls12_381(BLS12_381_g2_projective_t* point1, BLS12_381_g2_projective_t* point2);
#ifdef __cplusplus
}
#endif

View File

@@ -1,49 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ve_mod_mult.h
#ifndef _BLS12_381_VEC_MULT_H
#define _BLS12_381_VEC_MULT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
int32_t vec_mod_mult_point_bls12_381(
BLS12_381_projective_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t vec_mod_mult_scalar_bls12_381(
BLS12_381_scalar_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t vec_mod_mult_device_scalar_bls12_381(
BLS12_381_scalar_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
int32_t matrix_vec_mod_mult_bls12_381(
BLS12_381_scalar_t* matrix_flattened,
BLS12_381_scalar_t* input,
BLS12_381_scalar_t* output,
size_t n_elments,
size_t device_id);
#ifdef __cplusplus
}
#endif
#endif /* _BLS12_381_VEC_MULT_H */

View File

@@ -1,208 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"errors"
"fmt"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
// #include "msm.h"
import "C"
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BLS12_381_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(out))
ret := C.msm_cuda_bls12_381(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_cuda_bls12_381 returned error code: %d", ret)
}
return out, nil
}
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(out))
ret := C.msm_g2_cuda_bls12_381(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_g2_cuda_bls12_381 returned error code: %d", ret)
}
return out, nil
}
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G1ProjectivePoint, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G1ProjectivePoint, batchSize)
for i := 0; i < len(out); i++ {
var p G1ProjectivePoint
p.SetZero()
out[i] = &p
}
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BLS12_381_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_cuda_bls12_381(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bls12_381 returned error code: %d", ret)
}
return out, nil
}
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G2Point, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G2Point, batchSize)
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_g2_cuda_bls12_381(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bls12_381 returned error code: %d", ret)
}
return out, nil
}
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BLS12_381_projective_t)(d_out)
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
pointsC := (*C.BLS12_381_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BLS12_381_g2_projective_t)(d_out)
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_g2_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BLS12_381_projective_t)(d_out)
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
pointsC := (*C.BLS12_381_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.commit_batch_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BLS12_381_g2_projective_t)(d_out)
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.msm_batch_g2_cuda_bls12_381(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}

View File

@@ -1,355 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"fmt"
"math"
"testing"
"time"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
"github.com/stretchr/testify/assert"
)
func GeneratePoints(count int) []G1PointAffine {
// Declare a slice of integers
var points []G1PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var pointProjective G1ProjectivePoint
pointProjective.Random()
var pointAffine G1PointAffine
pointAffine.FromProjective(&pointProjective)
points = append(points, pointAffine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func GeneratePointsProj(count int) []G1ProjectivePoint {
// Declare a slice of integers
var points []G1ProjectivePoint
// Use a loop to populate the slice
for i := 0; i < count; i++ {
var p G1ProjectivePoint
p.Random()
points = append(points, p)
}
return points
}
func GenerateScalars(count int, skewed bool) []G1ScalarField {
// Declare a slice of integers
var scalars []G1ScalarField
var rand G1ScalarField
var zero G1ScalarField
var one G1ScalarField
var randLarge G1ScalarField
zero.SetZero()
one.SetOne()
randLarge.Random()
if skewed && count > 1_200_000 {
for i := 0; i < count-1_200_000; i++ {
rand.Random()
scalars = append(scalars, rand)
}
for i := 0; i < 600_000; i++ {
scalars = append(scalars, randLarge)
}
for i := 0; i < 400_000; i++ {
scalars = append(scalars, zero)
}
for i := 0; i < 200_000; i++ {
scalars = append(scalars, one)
}
} else {
for i := 0; i < count; i++ {
rand.Random()
scalars = append(scalars, rand)
}
}
return scalars[:count]
}
func TestMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out := new(G1ProjectivePoint)
startTime := time.Now()
_, e := Msm(out, points, scalars, 0) // non mont
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func TestCommitMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1<<v - 1
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := count * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := Commit(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G1ProjectivePoint, 1)
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
assert.Equal(t, e, 0, "error should be 0")
assert.True(t, outHost[0].IsOnCurve())
}
}
func BenchmarkCommit(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := msmSize * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := msmSize * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
if e != 0 {
panic("Error occured")
}
}
})
}
}
func TestBenchMSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GeneratePoints(count)
scalars := GenerateScalars(count, false)
a, e := MsmBatch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBLS12_381 returned an error: %v", e)
}
if len(a) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(a))
}
}
}
}
func BenchmarkMSM(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G1ProjectivePoint)
_, e := Msm(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
// G2
func GenerateG2Points(count int) []G2PointAffine {
// Declare a slice of integers
var points []G2PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var p G2Point
p.Random()
var affine G2PointAffine
affine.FromProjective(&p)
points = append(points, affine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func TestMsmG2BLS12381(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, false)
fmt.Print("Finished generating scalars\n")
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func BenchmarkMsmG2BLS12381(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GenerateG2Points(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
func TestCommitG2MSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
var sizeCheckG2PointAffine G2PointAffine
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
var sizeCheckG2Point G2Point
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := CommitG2(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G2Point, 1)
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
assert.Equal(t, e, 0, "error should be 0")
assert.Equal(t, len(outHost), 1)
result := outHost[0]
assert.True(t, result.IsOnCurve())
}
}
func TestBatchG2MSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GenerateG2Points(count)
scalars := GenerateScalars(count, false)
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBLS12_381 returned an error: %v", e)
}
if len(pointsResults) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
}
for _, s := range pointsResults {
assert.True(t, s.IsOnCurve())
}
}
}
}

View File

@@ -1,221 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"errors"
"fmt"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
// #include "ntt.h"
import "C"
const (
NONE = 0
DIF = 1
DIT = 2
)
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
ret := C.ntt_cuda_bls12_381(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
return uint64(ret)
}
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
isInverseC := C.bool(isInverse)
batchSizeC := C.uint32_t(batchSize)
deviceIdC := C.size_t(deviceId)
ret := C.ntt_batch_cuda_bls12_381(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
valuesC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
ret := C.ecntt_cuda_bls12_381(valuesC, n, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
valuesC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
batchSizeC := C.uint32_t(batchSize)
ret := C.ecntt_batch_cuda_bls12_381(valuesC, n, batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
domain_size := C.uint32_t(d_size)
logn := C.uint32_t(log_d_size)
is_inverse := C.bool(inverse)
dp := C.build_domain_cuda_bls12_381(domain_size, logn, is_inverse, 0, 0)
if dp == nil {
err = errors.New("nullptr returned from generating twiddles")
return unsafe.Pointer(nil), err
}
return unsafe.Pointer(dp), nil
}
// Reverses d_scalars in-place
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
lenC := C.int(len)
if success := C.reverse_order_scalars_cuda_bls12_381(scalarsC, lenC, 0, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
size_d := size * 32
dp, err := goicicle.CudaMalloc(size_d)
if err != nil {
return nil
}
d_out := (*C.BLS12_381_scalar_t)(dp)
scalarsC := (*C.BLS12_381_scalar_t)(scalars)
twiddlesC := (*C.BLS12_381_scalar_t)(twiddles)
cosetPowersC := (*C.BLS12_381_scalar_t)(cosetPowers)
sizeC := C.uint(size)
var ret C.int
if isCoset {
ret = C.interpolate_scalars_on_coset_cuda_bls12_381(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
} else {
ret = C.interpolate_scalars_cuda_bls12_381(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
}
return unsafe.Pointer(d_out)
}
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
scalars_outC := (*C.BLS12_381_scalar_t)(scalars_out)
scalarsC := (*C.BLS12_381_scalar_t)(scalars)
twiddlesC := (*C.BLS12_381_scalar_t)(twiddles)
coset_powersC := (*C.BLS12_381_scalar_t)(coset_powers)
sizeC := C.uint(scalars_size)
twiddlesC_size := C.uint(twiddles_size)
var ret C.int
if isCoset {
ret = C.evaluate_scalars_on_coset_cuda_bls12_381(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
} else {
ret = C.evaluate_scalars_cuda_bls12_381(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
return -1
}
return 0
}
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BLS12_381_scalar_t)(in1_d)
in2_dC := (*C.BLS12_381_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.add_scalars_cuda_bls12_381(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error adding scalar vectors")
return -1
}
return 0
}
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BLS12_381_scalar_t)(in1_d)
in2_dC := (*C.BLS12_381_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.sub_scalars_cuda_bls12_381(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error subtracting scalar vectors")
return -1
}
return 0
}
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.to_montgomery_scalars_cuda_bls12_381(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.from_montgomery_scalars_cuda_bls12_381(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BLS12_381_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_cuda_bls12_381(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_g2_cuda_bls12_381(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}

View File

@@ -1,148 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
import (
"fmt"
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
func TestNttBLS12381BBB(t *testing.T) {
count := 1 << 20
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
NttBatch(&nttResult, false, count, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBLS12381CompareToGnarkDIF(t *testing.T) {
count := 1 << 2
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, DIF, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestINttBLS12381CompareToGnarkDIT(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, true, DIT, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBLS12381(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, NONE, 0)
assert.NotEqual(t, nttResult, scalars)
inttResult := make([]G1ScalarField, len(nttResult))
copy(inttResult, nttResult)
assert.Equal(t, inttResult, nttResult)
Ntt(&inttResult, true, NONE, 0)
assert.Equal(t, inttResult, scalars)
}
func TestNttBatchBLS12381(t *testing.T) {
count := 1 << 5
batches := 4
scalars := GenerateScalars(count*batches, false)
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
for i := 0; i < batches; i++ {
start := i * count
end := (i + 1) * count
batch := make([]G1ScalarField, len(scalars[start:end]))
copy(batch, scalars[start:end])
scalarVecOfVec = append(scalarVecOfVec, batch)
}
nttBatchResult := make([]G1ScalarField, len(scalars))
copy(nttBatchResult, scalars)
NttBatch(&nttBatchResult, false, count, 0)
var nttResultVecOfVec [][]G1ScalarField
for i := 0; i < batches; i++ {
// Clone the slice
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
copy(clone, scalarVecOfVec[i])
// Add it to the result vector of vectors
nttResultVecOfVec = append(nttResultVecOfVec, clone)
// Call the ntt_bls12_381 function
Ntt(&nttResultVecOfVec[i], false, NONE, 0)
}
assert.NotEqual(t, nttBatchResult, scalars)
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i := 0; i < batches; i++ {
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
}
}
}
func BenchmarkNTT(b *testing.B) {
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
for _, logNTTSize := range LOG_NTT_SIZES {
nttSize := 1 << logNTTSize
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
scalars := GenerateScalars(nttSize, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
for n := 0; n < b.N; n++ {
Ntt(&nttResult, false, NONE, 0)
}
})
}
}

View File

@@ -1,38 +0,0 @@
package bls12381
import "encoding/binary"
// Function to convert [8]uint32 to [4]uint64
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
var arr64 [4]uint64
for i := 0; i < len(arr32); i += 2 {
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
}
return arr64
}
func ConvertUint64ArrToUint32Arr4(arr64 [4]uint64) [8]uint32 {
var arr32 [8]uint32
for i, v := range arr64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, v)
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
}
return arr32
}
func ConvertUint64ArrToUint32Arr6(arr64 [6]uint64) [12]uint32 {
var arr32 [12]uint32
for i, v := range arr64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, v)
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
}
return arr32
}

View File

@@ -1,81 +0,0 @@
package bls12381
import (
"testing"
)
func TestConvertUint32ArrToUint64Arr(t *testing.T) {
testCases := []struct {
name string
input [8]uint32
want [4]uint64
}{
{
name: "Test with incremental array",
input: [8]uint32{1, 2, 3, 4, 5, 6, 7, 8},
want: [4]uint64{4294967298, 12884901892, 21474836486, 30064771080},
},
{
name: "Test with all zeros",
input: [8]uint32{0, 0, 0, 0, 0, 0, 0, 0},
want: [4]uint64{0, 0, 0, 0},
},
{
name: "Test with maximum uint32 values",
input: [8]uint32{4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295},
want: [4]uint64{18446744073709551615, 18446744073709551615, 18446744073709551615, 18446744073709551615},
},
{
name: "Test with alternating min and max uint32 values",
input: [8]uint32{0, 4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295},
want: [4]uint64{4294967295, 4294967295, 4294967295, 4294967295},
},
{
name: "Test with alternating max and min uint32 values",
input: [8]uint32{4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295, 0},
want: [4]uint64{18446744069414584320, 18446744069414584320, 18446744069414584320, 18446744069414584320},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ConvertUint32ArrToUint64Arr(tc.input)
if got != tc.want {
t.Errorf("got %v, want %v", got, tc.want)
}
})
}
}
func TestConvertUint64ArrToUint32Arr(t *testing.T) {
testCases := []struct {
name string
input [6]uint64
expected [12]uint32
}{
{
name: "test one",
input: [6]uint64{1, 2, 3, 4, 5, 6},
expected: [12]uint32{1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0},
},
{
name: "test two",
input: [6]uint64{100, 200, 300, 400, 500, 600},
expected: [12]uint32{100, 0, 200, 0, 300, 0, 400, 0, 500, 0, 600, 0},
},
{
name: "test three",
input: [6]uint64{1000, 2000, 3000, 4000, 5000, 6000},
expected: [12]uint32{1000, 0, 2000, 0, 3000, 0, 4000, 0, 5000, 0, 6000, 0},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ConvertUint64ArrToUint32Arr6(tc.input)
if got != tc.expected {
t.Errorf("got %v, want %v", got, tc.expected)
}
})
}
}

View File

@@ -1,41 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bls12381
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
// #include "ve_mod_mult.h"
import "C"
import (
"fmt"
"unsafe"
)
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
scalarVec1C := (*C.BLS12_381_scalar_t)(scalarVec1)
scalarVec2C := (*C.BLS12_381_scalar_t)(scalarVec2)
sizeC := C.size_t(size)
ret := C.vec_mod_mult_device_scalar_bls12_381(scalarVec1C, scalarVec2C, sizeC, 0)
if ret != 0 {
fmt.Print("error multiplying scalar vectors")
return -1
}
return 0
}

View File

@@ -1,324 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"unsafe"
"encoding/binary"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
const SCALAR_SIZE = 8
const BASE_SIZE = 8
type G1ScalarField struct {
S [SCALAR_SIZE]uint32
}
type G1BaseField struct {
S [BASE_SIZE]uint32
}
/*
* BaseField Constrctors
*/
func (f *G1BaseField) SetZero() *G1BaseField {
var S [BASE_SIZE]uint32
f.S = S
return f
}
func (f *G1BaseField) SetOne() *G1BaseField {
var S [BASE_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
out := (*C.BN254_projective_t)(unsafe.Pointer(p))
in := (*C.BN254_affine_t)(unsafe.Pointer(affine))
C.projective_from_affine_bn254(out, in)
return p
}
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
copy(f.S[:], limbs[:])
return f
}
/*
* BaseField methods
*/
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
return f.S
}
func (f *G1BaseField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* ScalarField methods
*/
func (p *G1ScalarField) Random() *G1ScalarField {
outC := (*C.BN254_scalar_t)(unsafe.Pointer(p))
C.random_scalar_bn254(outC)
return p
}
func (f *G1ScalarField) SetZero() *G1ScalarField {
var S [SCALAR_SIZE]uint32
f.S = S
return f
}
func (f *G1ScalarField) SetOne() *G1ScalarField {
var S [SCALAR_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
for i, v := range a.S {
if b.S[i] != v {
return false
}
}
return true
}
/*
* ScalarField methods
*/
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
return f.S
}
func (f *G1ScalarField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* PointBN254
*/
type G1ProjectivePoint struct {
X, Y, Z G1BaseField
}
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
var yOne G1BaseField
yOne.SetOne()
var xZero G1BaseField
xZero.SetZero()
var zZero G1BaseField
zZero.SetZero()
f.X = xZero
f.Y = yOne
f.Z = zZero
return f
}
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
// Cast *PointBN254 to *C.BN254_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It'S your responsibility to ensure that the types are compatible.
pC := (*C.BN254_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BN254_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it'S fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_bn254(pC, pCompareC))
}
func (p *G1ProjectivePoint) IsOnCurve() bool {
point := (*C.BN254_projective_t)(unsafe.Pointer(p))
res := C.projective_is_on_curve_bn254(point)
return bool(res)
}
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
outC := (*C.BN254_projective_t)(unsafe.Pointer(p))
C.random_projective_bn254(outC)
return p
}
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
return &G1PointAffine{
X: p.X,
Y: p.Y,
}
}
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
var _x G1BaseField
var _y G1BaseField
var _z G1BaseField
_x.FromLimbs(GetFixedLimbs(x))
_y.FromLimbs(GetFixedLimbs(y))
_z.FromLimbs(GetFixedLimbs(z))
p.X = _x
p.Y = _y
p.Z = _z
return p
}
/*
* PointAffineNoInfinityBN254
*/
type G1PointAffine struct {
X, Y G1BaseField
}
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
in := (*C.BN254_projective_t)(unsafe.Pointer(projective))
out := (*C.BN254_affine_t)(unsafe.Pointer(p))
C.projective_to_affine_bn254(out,in)
return p
}
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
var Z G1BaseField
Z.SetOne()
return &G1ProjectivePoint{
X: p.X,
Y: p.Y,
Z: Z,
}
}
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
var _x G1BaseField
var _y G1BaseField
_x.FromLimbs(GetFixedLimbs(X))
_y.FromLimbs(GetFixedLimbs(Y))
return p
}
/*
* Multiplication
*/
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
pointsC := (*C.BN254_projective_t)(unsafe.Pointer(&a[0]))
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_point_bn254(pointsC, scalarsC, nElementsC, deviceIdC)
}
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
aC := (*C.BN254_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_scalar_bn254(aC, bC, nElementsC, deviceIdC)
}
// Multiply a matrix by a scalar:
//
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
c := make([]G1ScalarField, len(b))
for i := range c {
var p G1ScalarField
p.SetZero()
c[i] = p
}
aC := (*C.BN254_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
cC := (*C.BN254_scalar_t)(unsafe.Pointer(&c[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.matrix_vec_mod_mult_bn254(aC, bC, cC, nElementsC, deviceIdC)
}
/*
* Utils
*/
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
if len(*slice) <= BASE_SIZE {
limbs := [BASE_SIZE]uint32{}
copy(limbs[:len(*slice)], *slice)
return limbs
}
panic("slice has too many elements")
}

View File

@@ -1,198 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"encoding/binary"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewFieldBN254One(t *testing.T) {
var oneField G1BaseField
oneField.SetOne()
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, oneField.S, rawOneField)
}
func TestNewFieldBN254Zero(t *testing.T) {
var zeroField G1BaseField
zeroField.SetZero()
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, zeroField.S, rawZeroField)
}
func TestFieldBN254ToBytesLe(t *testing.T) {
var p G1ProjectivePoint
p.Random()
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
for i, v := range p.X.S {
binary.LittleEndian.PutUint32(expected[i*4:], v)
}
assert.Equal(t, p.X.ToBytesLe(), expected)
assert.Equal(t, len(p.X.ToBytesLe()), 32)
}
func TestNewPointBN254Zero(t *testing.T) {
var pointZero G1ProjectivePoint
pointZero.SetZero()
var baseOne G1BaseField
baseOne.SetOne()
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, pointZero.X, zeroSanity)
assert.Equal(t, pointZero.Y, baseOne)
assert.Equal(t, pointZero.Z, zeroSanity)
}
func TestFromProjectiveToAffine(t *testing.T) {
var projective G1ProjectivePoint
var affine G1PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G1ProjectivePoint
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestBN254Eq(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
var p2 G1ProjectivePoint
p2.Random()
assert.Equal(t, p1.Eq(&p1), true)
assert.Equal(t, p1.Eq(&p2), false)
}
func TestBN254StripZ(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
p2ZLess := p1.StripZ()
assert.IsType(t, G1PointAffine{}, *p2ZLess)
assert.Equal(t, p1.X, p2ZLess.X)
assert.Equal(t, p1.Y, p2ZLess.Y)
}
func TestPointBN254fromLimbs(t *testing.T) {
var p G1ProjectivePoint
p.Random()
x := p.X.Limbs()
y := p.Y.Limbs()
z := p.Z.Limbs()
xSlice := x[:]
ySlice := y[:]
zSlice := z[:]
var pFromLimbs G1ProjectivePoint
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
assert.Equal(t, pFromLimbs, p)
}
func TestNewPointAffineNoInfinityBN254Zero(t *testing.T) {
var zeroP G1PointAffine
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, zeroP.X, zeroSanity)
assert.Equal(t, zeroP.Y, zeroSanity)
}
func TestPointAffineNoInfinityBN254FromLimbs(t *testing.T) {
// Initialize your test values
x := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
y := [8]uint32{9, 10, 11, 12, 13, 14, 15, 16}
xSlice := x[:]
ySlice := y[:]
// Execute your function
var result G1PointAffine
result.FromLimbs(&xSlice, &ySlice)
var xBase G1BaseField
var yBase G1BaseField
xBase.FromLimbs(x)
yBase.FromLimbs(y)
// Define your expected result
expected := &G1PointAffine{
X: xBase,
Y: yBase,
}
// Test if result is as expected
assert.Equal(t, result, expected)
}
func TestGetFixedLimbs(t *testing.T) {
t.Run("case of valid input of length less than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of valid input of length 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of empty input", func(t *testing.T) {
slice := []uint32{}
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of input length greater than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
defer func() {
if r := recover(); r == nil {
t.Errorf("the code did not panic")
}
}()
GetFixedLimbs(&slice)
})
}

View File

@@ -1,113 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"encoding/binary"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
// G2 extension field
type G2Element [4]uint64
type ExtentionField struct {
A0, A1 G2Element
}
type G2PointAffine struct {
X, Y ExtentionField
}
type G2Point struct {
X, Y, Z ExtentionField
}
func (p *G2Point) Random() *G2Point {
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
C.random_g2_projective_bn254(outC)
return p
}
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
out := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
in := (*C.BN254_g2_affine_t)(unsafe.Pointer(affine))
C.g2_projective_from_affine_bn254(out, in)
return p
}
func (p *G2Point) Eq(pCompare *G2Point) bool {
// Cast *PointBN254 to *C.BN254_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It's your responsibility to ensure that the types are compatible.
pC := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.BN254_g2_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it's fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_g2_bn254(pC, pCompareC))
}
func (f *G2Element) ToBytesLe() []byte {
var bytes []byte
for _, val := range f {
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
binary.LittleEndian.PutUint64(buf, val)
bytes = append(bytes, buf...)
}
return bytes
}
func (p *G2PointAffine) ToProjective() G2Point {
return G2Point{
X: p.X,
Y: p.Y,
Z: ExtentionField{
A0: G2Element{1, 0, 0, 0},
A1: G2Element{0, 0, 0, 0},
},
}
}
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
out := (*C.BN254_g2_affine_t)(unsafe.Pointer(p))
in := (*C.BN254_g2_projective_t)(unsafe.Pointer(projective))
C.g2_projective_to_affine_bn254(out, in)
return p
}
func (p *G2Point) IsOnCurve() bool {
// Directly copy memory from the C struct to the Go struct
point := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
res := C.g2_projective_is_on_curve_bn254(point)
return bool(res)
}

View File

@@ -1,76 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestG2Eqg2(t *testing.T) {
var point G2Point
point.Random()
assert.True(t, point.Eq(&point))
}
func TestG2FromProjectiveToAffine(t *testing.T) {
var projective G2Point
var affine G2PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G2Point
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestG2Eqg2NotEqual(t *testing.T) {
var point G2Point
point.Random()
var point2 G2Point
point2.Random()
assert.False(t, point.Eq(&point2))
}
func TestG2ToBytes(t *testing.T) {
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
bytes := element.ToBytesLe()
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
}
func TestG2ShouldConvertToProjective(t *testing.T) {
var pointProjective G2Point
var pointAffine G2PointAffine
pointProjective.Random()
pointAffine.FromProjective(&pointProjective)
proj := pointAffine.ToProjective()
assert.True(t, proj.IsOnCurve())
assert.True(t, pointProjective.Eq(&proj))
}

View File

@@ -1,94 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdbool.h>
// msm.h
#ifndef _BN254_MSM_H
#define _BN254_MSM_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BN254 projective and affine structs
typedef struct BN254_projective_t BN254_projective_t;
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
typedef struct BN254_affine_t BN254_affine_t;
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
typedef struct BN254_scalar_t BN254_scalar_t;
typedef cudaStream_t CudaStream_t;
int msm_cuda_bn254(
BN254_projective_t* out, BN254_affine_t* points, BN254_scalar_t* scalars, size_t count, size_t device_id);
int msm_batch_cuda_bn254(
BN254_projective_t* out,
BN254_affine_t* points,
BN254_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_cuda_bn254(
BN254_projective_t* d_out,
BN254_scalar_t* d_scalars,
BN254_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_cuda_bn254(
BN254_projective_t* d_out,
BN254_scalar_t* d_scalars,
BN254_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id);
int msm_g2_cuda_bn254(
BN254_g2_projective_t* out, BN254_g2_affine_t* points, BN254_scalar_t* scalars, size_t count, size_t device_id);
int msm_batch_g2_cuda_bn254(
BN254_g2_projective_t* out,
BN254_g2_affine_t* points,
BN254_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_g2_cuda_bn254(
BN254_g2_projective_t* d_out,
BN254_scalar_t* d_scalars,
BN254_g2_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_g2_cuda_bn254(
BN254_g2_projective_t* d_out,
BN254_scalar_t* d_scalars,
BN254_g2_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BN254_MSM_H */

View File

@@ -1,193 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ntt.h
#ifndef _BN254_NTT_H
#define _BN254_NTT_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of BN254 projective and affine structs
typedef struct BN254_projective_t BN254_projective_t;
typedef struct BN254_affine_t BN254_affine_t;
typedef struct BN254_scalar_t BN254_scalar_t;
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
int ntt_cuda_bn254(BN254_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
int ntt_batch_cuda_bn254(BN254_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
int ecntt_cuda_bn254(BN254_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
int ecntt_batch_cuda_bn254(
BN254_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
BN254_scalar_t*
build_domain_cuda_bn254(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
int interpolate_scalars_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
unsigned device_id,
size_t stream);
int interpolate_scalars_batch_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_points_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
size_t device_id,
size_t stream);
int interpolate_points_batch_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int interpolate_scalars_on_coset_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
BN254_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int interpolate_scalars_batch_on_coset_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_evaluations,
BN254_scalar_t* d_domain,
unsigned n,
unsigned batch_size,
BN254_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_scalars_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned device_id,
size_t stream);
int evaluate_scalars_batch_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_points_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
size_t device_id,
size_t stream);
int evaluate_points_batch_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
size_t device_id,
size_t stream);
int evaluate_scalars_on_coset_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BN254_scalar_t* coset_powers,
unsigned device_id,
size_t stream);
int evaluate_scalars_on_coset_batch_cuda_bn254(
BN254_scalar_t* d_out,
BN254_scalar_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BN254_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
BN254_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int evaluate_points_on_coset_batch_cuda_bn254(
BN254_projective_t* d_out,
BN254_projective_t* d_coefficients,
BN254_scalar_t* d_domain,
unsigned domain_size,
unsigned n,
unsigned batch_size,
BN254_scalar_t* coset_powers,
size_t device_id,
size_t stream);
int reverse_order_scalars_cuda_bn254(BN254_scalar_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_scalars_batch_cuda_bn254(BN254_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int reverse_order_points_cuda_bn254(BN254_projective_t* arr, int n, size_t device_id, size_t stream);
int reverse_order_points_batch_cuda_bn254(
BN254_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
int add_scalars_cuda_bn254(
BN254_scalar_t* d_out, BN254_scalar_t* d_in1, BN254_scalar_t* d_in2, unsigned n, size_t stream);
int sub_scalars_cuda_bn254(
BN254_scalar_t* d_out, BN254_scalar_t* d_in1, BN254_scalar_t* d_in2, unsigned n, size_t stream);
int to_montgomery_scalars_cuda_bn254(BN254_scalar_t* d_inout, unsigned n, size_t stream);
int from_montgomery_scalars_cuda_bn254(BN254_scalar_t* d_inout, unsigned n, size_t stream);
// points g1
int to_montgomery_proj_points_cuda_bn254(BN254_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_cuda_bn254(BN254_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_cuda_bn254(BN254_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_cuda_bn254(BN254_affine_t* d_inout, unsigned n, size_t stream);
// points g2
int to_montgomery_proj_points_g2_cuda_bn254(BN254_g2_projective_t* d_inout, unsigned n, size_t stream);
int from_montgomery_proj_points_g2_cuda_bn254(BN254_g2_projective_t* d_inout, unsigned n, size_t stream);
int to_montgomery_aff_points_g2_cuda_bn254(BN254_g2_affine_t* d_inout, unsigned n, size_t stream);
int from_montgomery_aff_points_g2_cuda_bn254(BN254_g2_affine_t* d_inout, unsigned n, size_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _BN254_NTT_H */

View File

@@ -1,50 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// projective.h
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BN254_projective_t BN254_projective_t;
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
typedef struct BN254_affine_t BN254_affine_t;
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
typedef struct BN254_scalar_t BN254_scalar_t;
bool projective_is_on_curve_bn254(BN254_projective_t* point1);
int random_scalar_bn254(BN254_scalar_t* out);
int random_projective_bn254(BN254_projective_t* out);
BN254_projective_t* projective_zero_bn254();
int projective_to_affine_bn254(BN254_affine_t* out, BN254_projective_t* point1);
int projective_from_affine_bn254(BN254_projective_t* out, BN254_affine_t* point1);
int random_g2_projective_bn254(BN254_g2_projective_t* out);
int g2_projective_to_affine_bn254(BN254_g2_affine_t* out, BN254_g2_projective_t* point1);
int g2_projective_from_affine_bn254(BN254_g2_projective_t* out, BN254_g2_affine_t* point1);
bool g2_projective_is_on_curve_bn254(BN254_g2_projective_t* point1);
bool eq_bn254(BN254_projective_t* point1, BN254_projective_t* point2);
bool eq_g2_bn254(BN254_g2_projective_t* point1, BN254_g2_projective_t* point2);
#ifdef __cplusplus
}
#endif

View File

@@ -1,45 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
#include <cuda.h>
#include <stdbool.h>
// ve_mod_mult.h
#ifndef _BN254_VEC_MULT_H
#define _BN254_VEC_MULT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct BN254_projective_t BN254_projective_t;
typedef struct BN254_scalar_t BN254_scalar_t;
int32_t
vec_mod_mult_point_bn254(BN254_projective_t* inout, BN254_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t
vec_mod_mult_scalar_bn254(BN254_scalar_t* inout, BN254_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
int32_t vec_mod_mult_device_scalar_bn254(
BN254_scalar_t* inout, BN254_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
int32_t matrix_vec_mod_mult_bn254(
BN254_scalar_t* matrix_flattened, BN254_scalar_t* input, BN254_scalar_t* output, size_t n_elments, size_t device_id);
#ifdef __cplusplus
}
#endif
#endif /* _BN254_VEC_MULT_H */

View File

@@ -1,208 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"errors"
"fmt"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
// #include "msm.h"
import "C"
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BN254_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BN254_projective_t)(unsafe.Pointer(out))
ret := C.msm_cuda_bn254(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_cuda_bn254 returned error code: %d", ret)
}
return out, nil
}
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
if len(points) != len(scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
pointsC := (*C.BN254_g2_affine_t)(unsafe.Pointer(&points[0]))
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&scalars[0]))
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(out))
ret := C.msm_g2_cuda_bn254(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
if ret != 0 {
return nil, fmt.Errorf("msm_g2_cuda_bn254 returned error code: %d", ret)
}
return out, nil
}
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G1ProjectivePoint, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G1ProjectivePoint, batchSize)
for i := 0; i < len(out); i++ {
var p G1ProjectivePoint
p.SetZero()
out[i] = &p
}
outC := (*C.BN254_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BN254_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_cuda_bn254(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bn254 returned error code: %d", ret)
}
return out, nil
}
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]*G2Point, error) {
// Check for nil pointers
if points == nil || scalars == nil {
return nil, errors.New("points or scalars is nil")
}
if len(*points) != len(*scalars) {
return nil, errors.New("error on: len(points) != len(scalars)")
}
// Check for empty slices
if len(*points) == 0 || len(*scalars) == 0 {
return nil, errors.New("points or scalars is empty")
}
// Check for zero batchSize
if batchSize <= 0 {
return nil, errors.New("error on: batchSize must be greater than zero")
}
out := make([]*G2Point, batchSize)
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(&out[0]))
pointsC := (*C.BN254_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
msmSizeC := C.size_t(len(*points) / batchSize)
deviceIdC := C.size_t(deviceId)
batchSizeC := C.size_t(batchSize)
ret := C.msm_batch_g2_cuda_bn254(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
if ret != 0 {
return nil, fmt.Errorf("msm_batch_cuda_bn254 returned error code: %d", ret)
}
return out, nil
}
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BN254_projective_t)(d_out)
scalarsC := (*C.BN254_scalar_t)(d_scalars)
pointsC := (*C.BN254_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_cuda_bn254(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
d_outC := (*C.BN254_g2_projective_t)(d_out)
scalarsC := (*C.BN254_scalar_t)(d_scalars)
pointsC := (*C.BN254_g2_affine_t)(d_points)
countC := (C.size_t)(count)
largeBucketFactorC := C.uint(bucketFactor)
ret := C.commit_g2_cuda_bn254(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BN254_projective_t)(d_out)
scalarsC := (*C.BN254_scalar_t)(d_scalars)
pointsC := (*C.BN254_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.commit_batch_cuda_bn254(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
d_outC := (*C.BN254_g2_projective_t)(d_out)
scalarsC := (*C.BN254_scalar_t)(d_scalars)
pointsC := (*C.BN254_g2_affine_t)(d_points)
countC := (C.size_t)(count)
batch_sizeC := (C.size_t)(batch_size)
ret := C.msm_batch_g2_cuda_bn254(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
if ret != 0 {
return -1
}
return 0
}

View File

@@ -1,355 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"fmt"
"math"
"testing"
"time"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
"github.com/stretchr/testify/assert"
)
func GeneratePoints(count int) []G1PointAffine {
// Declare a slice of integers
var points []G1PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var pointProjective G1ProjectivePoint
pointProjective.Random()
var pointAffine G1PointAffine
pointAffine.FromProjective(&pointProjective)
points = append(points, pointAffine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func GeneratePointsProj(count int) []G1ProjectivePoint {
// Declare a slice of integers
var points []G1ProjectivePoint
// Use a loop to populate the slice
for i := 0; i < count; i++ {
var p G1ProjectivePoint
p.Random()
points = append(points, p)
}
return points
}
func GenerateScalars(count int, skewed bool) []G1ScalarField {
// Declare a slice of integers
var scalars []G1ScalarField
var rand G1ScalarField
var zero G1ScalarField
var one G1ScalarField
var randLarge G1ScalarField
zero.SetZero()
one.SetOne()
randLarge.Random()
if skewed && count > 1_200_000 {
for i := 0; i < count-1_200_000; i++ {
rand.Random()
scalars = append(scalars, rand)
}
for i := 0; i < 600_000; i++ {
scalars = append(scalars, randLarge)
}
for i := 0; i < 400_000; i++ {
scalars = append(scalars, zero)
}
for i := 0; i < 200_000; i++ {
scalars = append(scalars, one)
}
} else {
for i := 0; i < count; i++ {
rand.Random()
scalars = append(scalars, rand)
}
}
return scalars[:count]
}
func TestMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out := new(G1ProjectivePoint)
startTime := time.Now()
_, e := Msm(out, points, scalars, 0) // non mont
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func TestCommitMSM(t *testing.T) {
for _, v := range []int{24} {
count := 1<<v - 1
points := GeneratePoints(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := count * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := Commit(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G1ProjectivePoint, 1)
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
assert.Equal(t, e, 0, "error should be 0")
assert.True(t, outHost[0].IsOnCurve())
}
}
func BenchmarkCommit(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
out_d, _ := goicicle.CudaMalloc(96)
pointsBytes := msmSize * 64
points_d, _ := goicicle.CudaMalloc(pointsBytes)
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
scalarBytes := msmSize * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
if e != 0 {
panic("Error occured")
}
}
})
}
}
func TestBenchMSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GeneratePoints(count)
scalars := GenerateScalars(count, false)
a, e := MsmBatch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBN254 returned an error: %v", e)
}
if len(a) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(a))
}
}
}
}
func BenchmarkMSM(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GeneratePoints(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G1ProjectivePoint)
_, e := Msm(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
// G2
func GenerateG2Points(count int) []G2PointAffine {
// Declare a slice of integers
var points []G2PointAffine
// populate the slice
for i := 0; i < 10; i++ {
var p G2Point
p.Random()
var affine G2PointAffine
affine.FromProjective(&p)
points = append(points, affine)
}
log2_10 := math.Log2(10)
log2Count := math.Log2(float64(count))
log2Size := int(math.Ceil(log2Count - log2_10))
for i := 0; i < log2Size; i++ {
points = append(points, points...)
}
return points[:count]
}
func TestMsmG2BN254(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, false)
fmt.Print("Finished generating scalars\n")
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
assert.Equal(t, e, nil, "error should be nil")
assert.True(t, out.IsOnCurve())
}
}
func BenchmarkMsmG2BN254(b *testing.B) {
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
for _, logMsmSize := range LOG_MSM_SIZES {
msmSize := 1 << logMsmSize
points := GenerateG2Points(msmSize)
scalars := GenerateScalars(msmSize, false)
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
for n := 0; n < b.N; n++ {
out := new(G2Point)
_, e := MsmG2(out, points, scalars, 0)
if e != nil {
panic("Error occured")
}
}
})
}
}
func TestCommitG2MSM(t *testing.T) {
for _, v := range []int{24} {
count := 1 << v
points := GenerateG2Points(count)
fmt.Print("Finished generating points\n")
scalars := GenerateScalars(count, true)
fmt.Print("Finished generating scalars\n")
var sizeCheckG2PointAffine G2PointAffine
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
var sizeCheckG2Point G2Point
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
scalarBytes := count * 32
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
startTime := time.Now()
e := CommitG2(out_d, scalars_d, points_d, count, 10)
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
outHost := make([]G2Point, 1)
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
assert.Equal(t, e, 0, "error should be 0")
assert.Equal(t, len(outHost), 1)
result := outHost[0]
assert.True(t, result.IsOnCurve())
}
}
func TestBatchG2MSM(t *testing.T) {
for _, batchPow2 := range []int{2, 4} {
for _, pow2 := range []int{4, 6} {
msmSize := 1 << pow2
batchSize := 1 << batchPow2
count := msmSize * batchSize
points := GenerateG2Points(count)
scalars := GenerateScalars(count, false)
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
if e != nil {
t.Errorf("MsmBatchBN254 returned an error: %v", e)
}
if len(pointsResults) != batchSize {
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
}
for _, s := range pointsResults {
assert.True(t, s.IsOnCurve())
}
}
}
}

View File

@@ -1,221 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"errors"
"fmt"
"unsafe"
"github.com/ingonyama-zk/icicle/goicicle"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
// #include "ntt.h"
import "C"
const (
NONE = 0
DIF = 1
DIT = 2
)
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
ret := C.ntt_cuda_bn254(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
return uint64(ret)
}
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
isInverseC := C.bool(isInverse)
batchSizeC := C.uint32_t(batchSize)
deviceIdC := C.size_t(deviceId)
ret := C.ntt_batch_cuda_bn254(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
valuesC := (*C.BN254_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
ret := C.ecntt_cuda_bn254(valuesC, n, isInverseC, deviceIdC)
return uint64(ret)
}
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
valuesC := (*C.BN254_projective_t)(unsafe.Pointer(&(*values)[0]))
deviceIdC := C.size_t(deviceId)
isInverseC := C.bool(isInverse)
n := C.uint32_t(len(*values))
batchSizeC := C.uint32_t(batchSize)
ret := C.ecntt_batch_cuda_bn254(valuesC, n, batchSizeC, isInverseC, deviceIdC)
return uint64(ret)
}
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
domain_size := C.uint32_t(d_size)
logn := C.uint32_t(log_d_size)
is_inverse := C.bool(inverse)
dp := C.build_domain_cuda_bn254(domain_size, logn, is_inverse, 0, 0)
if dp == nil {
err = errors.New("nullptr returned from generating twiddles")
return unsafe.Pointer(nil), err
}
return unsafe.Pointer(dp), nil
}
// Reverses d_scalars in-place
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BN254_scalar_t)(d_scalars)
lenC := C.int(len)
if success := C.reverse_order_scalars_cuda_bn254(scalarsC, lenC, 0, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
size_d := size * 32
dp, err := goicicle.CudaMalloc(size_d)
if err != nil {
return nil
}
d_out := (*C.BN254_scalar_t)(dp)
scalarsC := (*C.BN254_scalar_t)(scalars)
twiddlesC := (*C.BN254_scalar_t)(twiddles)
cosetPowersC := (*C.BN254_scalar_t)(cosetPowers)
sizeC := C.uint(size)
var ret C.int
if isCoset {
ret = C.interpolate_scalars_on_coset_cuda_bn254(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
} else {
ret = C.interpolate_scalars_cuda_bn254(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
}
return unsafe.Pointer(d_out)
}
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
scalars_outC := (*C.BN254_scalar_t)(scalars_out)
scalarsC := (*C.BN254_scalar_t)(scalars)
twiddlesC := (*C.BN254_scalar_t)(twiddles)
coset_powersC := (*C.BN254_scalar_t)(coset_powers)
sizeC := C.uint(scalars_size)
twiddlesC_size := C.uint(twiddles_size)
var ret C.int
if isCoset {
ret = C.evaluate_scalars_on_coset_cuda_bn254(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
} else {
ret = C.evaluate_scalars_cuda_bn254(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
}
if ret != 0 {
fmt.Print("error interpolating")
return -1
}
return 0
}
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BN254_scalar_t)(in1_d)
in2_dC := (*C.BN254_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.add_scalars_cuda_bn254(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error adding scalar vectors")
return -1
}
return 0
}
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
in1_dC := (*C.BN254_scalar_t)(in1_d)
in2_dC := (*C.BN254_scalar_t)(in2_d)
sizeC := C.uint(size)
ret := C.sub_scalars_cuda_bn254(in1_dC, in1_dC, in2_dC, sizeC, 0)
if ret != 0 {
fmt.Print("error subtracting scalar vectors")
return -1
}
return 0
}
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BN254_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.to_montgomery_scalars_cuda_bn254(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
scalarsC := (*C.BN254_scalar_t)(d_scalars)
lenC := C.uint(len)
if success := C.from_montgomery_scalars_cuda_bn254(scalarsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BN254_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_cuda_bn254(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
pointsC := (*C.BN254_g2_affine_t)(d_points)
lenC := C.uint(len)
if success := C.from_montgomery_aff_points_g2_cuda_bn254(pointsC, lenC, 0); success != 0 {
return -1, errors.New("reversing failed")
}
return 0, nil
}

View File

@@ -1,148 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
import (
"fmt"
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
func TestNttBN254Batch(t *testing.T) {
count := 1 << 20
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
NttBatch(&nttResult, false, count, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBN254CompareToGnarkDIF(t *testing.T) {
count := 1 << 2
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, DIF, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestINttBN254CompareToGnarkDIT(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, true, DIT, 0)
assert.NotEqual(t, nttResult, scalars)
assert.Equal(t, nttResult, nttResult)
}
func TestNttBN254(t *testing.T) {
count := 1 << 3
scalars := GenerateScalars(count, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
assert.Equal(t, nttResult, scalars)
Ntt(&nttResult, false, NONE, 0)
assert.NotEqual(t, nttResult, scalars)
inttResult := make([]G1ScalarField, len(nttResult))
copy(inttResult, nttResult)
assert.Equal(t, inttResult, nttResult)
Ntt(&inttResult, true, NONE, 0)
assert.Equal(t, inttResult, scalars)
}
func TestNttBatchBN254(t *testing.T) {
count := 1 << 5
batches := 4
scalars := GenerateScalars(count*batches, false)
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
for i := 0; i < batches; i++ {
start := i * count
end := (i + 1) * count
batch := make([]G1ScalarField, len(scalars[start:end]))
copy(batch, scalars[start:end])
scalarVecOfVec = append(scalarVecOfVec, batch)
}
nttBatchResult := make([]G1ScalarField, len(scalars))
copy(nttBatchResult, scalars)
NttBatch(&nttBatchResult, false, count, 0)
var nttResultVecOfVec [][]G1ScalarField
for i := 0; i < batches; i++ {
// Clone the slice
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
copy(clone, scalarVecOfVec[i])
// Add it to the result vector of vectors
nttResultVecOfVec = append(nttResultVecOfVec, clone)
// Call the ntt_bn254 function
Ntt(&nttResultVecOfVec[i], false, NONE, 0)
}
assert.NotEqual(t, nttBatchResult, scalars)
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
for i := 0; i < batches; i++ {
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
}
}
}
func BenchmarkNTT(b *testing.B) {
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
for _, logNTTSize := range LOG_NTT_SIZES {
nttSize := 1 << logNTTSize
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
scalars := GenerateScalars(nttSize, false)
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
copy(nttResult, scalars)
for n := 0; n < b.N; n++ {
Ntt(&nttResult, false, NONE, 0)
}
})
}
}

View File

@@ -1,48 +0,0 @@
package bn254
import (
"encoding/binary"
"fmt"
"log"
"regexp"
"runtime"
"time"
)
// Function to convert [8]uint32 to [4]uint64
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
var arr64 [4]uint64
for i := 0; i < len(arr32); i += 2 {
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
}
return arr64
}
func ConvertUint64ArrToUint32Arr(arr64 [4]uint64) [8]uint32 {
var arr32 [8]uint32
for i, v := range arr64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, v)
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
}
return arr32
}
func TimeTrack(start time.Time) {
elapsed := time.Since(start)
// Skip this function, and fetch the PC and file for its parent.
pc, _, _, _ := runtime.Caller(1)
// Retrieve a function object this functions parent.
funcObj := runtime.FuncForPC(pc)
// Regex to extract just the function name (and not the module path).
runtimeFunc := regexp.MustCompile(`^.*\.(.*)$`)
name := runtimeFunc.ReplaceAllString(funcObj.Name(), "$1")
log.Println(fmt.Sprintf("%s took %s", name, elapsed))
}

View File

@@ -1,81 +0,0 @@
package bn254
import (
"testing"
)
func TestConvertUint32ArrToUint64Arr(t *testing.T) {
testCases := []struct {
name string
input [8]uint32
want [4]uint64
}{
{
name: "Test with incremental array",
input: [8]uint32{1, 2, 3, 4, 5, 6, 7, 8},
want: [4]uint64{4294967298, 12884901892, 21474836486, 30064771080},
},
{
name: "Test with all zeros",
input: [8]uint32{0, 0, 0, 0, 0, 0, 0, 0},
want: [4]uint64{0, 0, 0, 0},
},
{
name: "Test with maximum uint32 values",
input: [8]uint32{4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295},
want: [4]uint64{18446744073709551615, 18446744073709551615, 18446744073709551615, 18446744073709551615},
},
{
name: "Test with alternating min and max uint32 values",
input: [8]uint32{0, 4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295},
want: [4]uint64{4294967295, 4294967295, 4294967295, 4294967295},
},
{
name: "Test with alternating max and min uint32 values",
input: [8]uint32{4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295, 0},
want: [4]uint64{18446744069414584320, 18446744069414584320, 18446744069414584320, 18446744069414584320},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ConvertUint32ArrToUint64Arr(tc.input)
if got != tc.want {
t.Errorf("got %v, want %v", got, tc.want)
}
})
}
}
func TestConvertUint64ArrToUint32Arr(t *testing.T) {
testCases := []struct {
name string
input [4]uint64
expected [8]uint32
}{
{
name: "test one",
input: [4]uint64{1, 2, 3, 4},
expected: [8]uint32{1, 0, 2, 0, 3, 0, 4, 0},
},
{
name: "test two",
input: [4]uint64{100, 200, 300, 400},
expected: [8]uint32{100, 0, 200, 0, 300, 0, 400, 0},
},
{
name: "test three",
input: [4]uint64{1000, 2000, 3000, 4000},
expected: [8]uint32{1000, 0, 2000, 0, 3000, 0, 4000, 0},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ConvertUint64ArrToUint32Arr(tc.input)
if got != tc.expected {
t.Errorf("got %v, want %v", got, tc.expected)
}
})
}
}

View File

@@ -1,41 +0,0 @@
// Copyright 2023 Ingonyama
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by Ingonyama DO NOT EDIT
package bn254
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
// #include "ve_mod_mult.h"
import "C"
import (
"fmt"
"unsafe"
)
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
scalarVec1C := (*C.BN254_scalar_t)(scalarVec1)
scalarVec2C := (*C.BN254_scalar_t)(scalarVec2)
sizeC := C.size_t(size)
ret := C.vec_mod_mult_device_scalar_bn254(scalarVec1C, scalarVec2C, sizeC, 0)
if ret != 0 {
fmt.Print("error multiplying scalar vectors")
return -1
}
return 0
}

View File

@@ -1,49 +0,0 @@
package goicicle
// This file implements CUDA driver context management
// #cgo CFLAGS: -I /usr/loca/cuda/include
// #cgo LDFLAGS: -L/usr/local/cuda/lib64 -lcudart
/*
#include <cuda.h>
#include <cuda_runtime.h>
*/
import "C"
import (
"errors"
"unsafe"
)
func CudaMalloc(size int) (dp unsafe.Pointer, err error) {
var p C.void
dp = unsafe.Pointer(&p)
if err := C.cudaMalloc(&dp, C.size_t(size)); err != 0 {
return nil, errors.New("could not create memory space")
}
return dp, nil
}
func CudaFree(dp unsafe.Pointer) int {
if err := C.cudaFree(dp); err != 0 {
return -1
}
return 0
}
func CudaMemCpyHtoD[T any](dst_d unsafe.Pointer, src []T, size int) int {
src_c := unsafe.Pointer(&src[0])
if err := C.cudaMemcpy(dst_d, src_c, C.size_t(size), 1); err != 0 {
return -1
}
return 0
}
func CudaMemCpyDtoH[T any](dst []T, src_d unsafe.Pointer, size int) int {
dst_c := unsafe.Pointer(&dst[0])
if err := C.cudaMemcpy(dst_c, src_d, C.size_t(size), 2); err != 0 {
return -1
}
return 0
}

View File

@@ -1,41 +0,0 @@
#!/bin/bash
SUDO=''
if [ "$EUID" != 0 ]; then
echo "Icicle setup script should be run with root privileges, please run this as root"
SUDO='sudo'
fi
TARGET_BN254="libbn254.so"
TARGET_BLS12_381="libbls12_381.so"
TARGET_BLS12_377="libbls12_377.so"
MAKE_FAIL=0
$SUDO make $1 || MAKE_FAIL=1
if [ $MAKE_FAIL != 0 ]; then
echo "make failed, install dependencies and re-run setup script with root privileges"
exit
fi
TARGET_BN254_PATH=$(dirname "$(find `pwd` -name $TARGET_BN254 -print -quit)")/
TARGET_BLS12_381_PATH=$(dirname "$(find `pwd` -name $TARGET_BLS12_381 -print -quit)")/
TARGET_BLS12_377_PATH=$(dirname "$(find `pwd` -name $TARGET_BLS12_377 -print -quit)")/
if [[ "$TARGET_BLS12_377_PATH" != "" ]]; then
echo "BLS12_377 found @ $TARGET_BLS12_377_PATH"
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BLS12_377_PATH
fi
if [[ "$TARGET_BN254_PATH" != "" ]]; then
echo "BN254 found @ $TARGET_BN254_PATH"
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BN254_PATH
fi
if [[ "$TARGET_BLS12_381_PATH" != "" ]]; then
echo "BLS12_381_PATH found @ $TARGET_BLS12_381_PATH"
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BLS12_381_PATH
fi

View File

@@ -1,42 +0,0 @@
package config
// {{.SharedLib}}
type Curve struct {
PackageName string
CurveNameUpperCase string
CurveNameLowerCase string
SharedLib string
ScalarSize int
BaseSize int
G2ElementSize int
}
var BN_254 = Curve{
PackageName: "bn254",
CurveNameUpperCase: "BN254",
CurveNameLowerCase: "bn254",
SharedLib: "-lbn254",
ScalarSize: 8,
BaseSize: 8,
G2ElementSize: 4,
}
var BLS_12_377 = Curve{
PackageName: "bls12377",
CurveNameUpperCase: "BLS12_377",
CurveNameLowerCase: "bls12_377",
SharedLib: "-lbls12_377",
ScalarSize: 8,
BaseSize: 12,
G2ElementSize: 6,
}
var BLS_12_381 = Curve{
PackageName: "bls12381",
CurveNameUpperCase: "BLS12_381",
CurveNameLowerCase: "bls12_381",
SharedLib: "-lbls12_381",
ScalarSize: 8,
BaseSize: 12,
G2ElementSize: 6,
}

View File

@@ -1,306 +0,0 @@
import (
"unsafe"
"encoding/binary"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
const SCALAR_SIZE = {{.ScalarSize}}
const BASE_SIZE = {{.BaseSize}}
type G1ScalarField struct {
S [SCALAR_SIZE]uint32
}
type G1BaseField struct {
S [BASE_SIZE]uint32
}
/*
* BaseField Constrctors
*/
func (f *G1BaseField) SetZero() *G1BaseField {
var S [BASE_SIZE]uint32
f.S = S
return f
}
func (f *G1BaseField) SetOne() *G1BaseField {
var S [BASE_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
out := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
in := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(affine))
C.projective_from_affine_{{.CurveNameLowerCase}}(out, in)
return p
}
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
copy(f.S[:], limbs[:])
return f
}
/*
* BaseField methods
*/
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
return f.S
}
func (f *G1BaseField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* ScalarField methods
*/
func (p *G1ScalarField) Random() *G1ScalarField {
outC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(p))
C.random_scalar_{{.CurveNameLowerCase}}(outC)
return p
}
func (f *G1ScalarField) SetZero() *G1ScalarField {
var S [SCALAR_SIZE]uint32
f.S = S
return f
}
func (f *G1ScalarField) SetOne() *G1ScalarField {
var S [SCALAR_SIZE]uint32
S[0] = 1
f.S = S
return f
}
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
for i, v := range a.S {
if b.S[i] != v {
return false
}
}
return true
}
/*
* ScalarField methods
*/
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
return f.S
}
func (f *G1ScalarField) ToBytesLe() []byte {
bytes := make([]byte, len(f.S)*4)
for i, v := range f.S {
binary.LittleEndian.PutUint32(bytes[i*4:], v)
}
return bytes
}
/*
* Point{{.CurveNameUpperCase}}
*/
type G1ProjectivePoint struct {
X, Y, Z G1BaseField
}
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
var yOne G1BaseField
yOne.SetOne()
var xZero G1BaseField
xZero.SetZero()
var zZero G1BaseField
zZero.SetZero()
f.X = xZero
f.Y = yOne
f.Z = zZero
return f
}
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
// Cast *Point{{.CurveNameUpperCase}} to *C.{{.CurveNameUpperCase}}_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It'S your responsibility to ensure that the types are compatible.
pC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it'S fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_{{.CurveNameLowerCase}}(pC, pCompareC))
}
func (p *G1ProjectivePoint) IsOnCurve() bool {
point := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
res := C.projective_is_on_curve_{{.CurveNameLowerCase}}(point)
return bool(res)
}
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
outC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
C.random_projective_{{.CurveNameLowerCase}}(outC)
return p
}
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
return &G1PointAffine{
X: p.X,
Y: p.Y,
}
}
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
var _x G1BaseField
var _y G1BaseField
var _z G1BaseField
_x.FromLimbs(GetFixedLimbs(x))
_y.FromLimbs(GetFixedLimbs(y))
_z.FromLimbs(GetFixedLimbs(z))
p.X = _x
p.Y = _y
p.Z = _z
return p
}
/*
* PointAffineNoInfinity{{.CurveNameUpperCase}}
*/
type G1PointAffine struct {
X, Y G1BaseField
}
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
in := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(projective))
out := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(p))
C.projective_to_affine_{{.CurveNameLowerCase}}(out,in)
return p
}
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
var Z G1BaseField
Z.SetOne()
return &G1ProjectivePoint{
X: p.X,
Y: p.Y,
Z: Z,
}
}
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
var _x G1BaseField
var _y G1BaseField
_x.FromLimbs(GetFixedLimbs(X))
_y.FromLimbs(GetFixedLimbs(Y))
return p
}
/*
* Multiplication
*/
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
pointsC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(&a[0]))
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_point_{{.CurveNameLowerCase}}(pointsC, scalarsC, nElementsC, deviceIdC)
}
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
if len(a) != len(b) {
panic("a and b have different lengths")
}
aC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.vec_mod_mult_scalar_{{.CurveNameLowerCase}}(aC, bC, nElementsC, deviceIdC)
}
// Multiply a matrix by a scalar:
//
// `a` - flattenned matrix;
// `b` - vector to multiply `a` by;
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
c := make([]G1ScalarField, len(b))
for i := range c {
var p G1ScalarField
p.SetZero()
c[i] = p
}
aC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&a[0]))
bC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
cC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&c[0]))
deviceIdC := C.size_t(deviceID)
nElementsC := C.size_t(len(a))
C.matrix_vec_mod_mult_{{.CurveNameLowerCase}}(aC, bC, cC, nElementsC, deviceIdC)
}
/*
* Utils
*/
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
if len(*slice) <= BASE_SIZE {
limbs := [BASE_SIZE]uint32{}
copy(limbs[:len(*slice)], *slice)
return limbs
}
panic("slice has too many elements")
}

View File

@@ -1,180 +0,0 @@
import (
"encoding/binary"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewField{{.CurveNameUpperCase}}One(t *testing.T) {
var oneField G1BaseField
oneField.SetOne()
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, oneField.S, rawOneField)
}
func TestNewField{{.CurveNameUpperCase}}Zero(t *testing.T) {
var zeroField G1BaseField
zeroField.SetZero()
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
assert.Equal(t, zeroField.S, rawZeroField)
}
func TestField{{.CurveNameUpperCase}}ToBytesLe(t *testing.T) {
var p G1ProjectivePoint
p.Random()
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
for i, v := range p.X.S {
binary.LittleEndian.PutUint32(expected[i*4:], v)
}
assert.Equal(t, p.X.ToBytesLe(), expected)
assert.Equal(t, len(p.X.ToBytesLe()), 32)
}
func TestNewPoint{{.CurveNameUpperCase}}Zero(t *testing.T) {
var pointZero G1ProjectivePoint
pointZero.SetZero()
var baseOne G1BaseField
baseOne.SetOne()
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, pointZero.X, zeroSanity)
assert.Equal(t, pointZero.Y, baseOne)
assert.Equal(t, pointZero.Z, zeroSanity)
}
func TestFromProjectiveToAffine(t *testing.T) {
var projective G1ProjectivePoint
var affine G1PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G1ProjectivePoint
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func Test{{.CurveNameUpperCase}}Eq(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
var p2 G1ProjectivePoint
p2.Random()
assert.Equal(t, p1.Eq(&p1), true)
assert.Equal(t, p1.Eq(&p2), false)
}
func Test{{.CurveNameUpperCase}}StripZ(t *testing.T) {
var p1 G1ProjectivePoint
p1.Random()
p2ZLess := p1.StripZ()
assert.IsType(t, G1PointAffine{}, *p2ZLess)
assert.Equal(t, p1.X, p2ZLess.X)
assert.Equal(t, p1.Y, p2ZLess.Y)
}
func TestPoint{{.CurveNameUpperCase}}fromLimbs(t *testing.T) {
var p G1ProjectivePoint
p.Random()
x := p.X.Limbs()
y := p.Y.Limbs()
z := p.Z.Limbs()
xSlice := x[:]
ySlice := y[:]
zSlice := z[:]
var pFromLimbs G1ProjectivePoint
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
assert.Equal(t, pFromLimbs, p)
}
func TestNewPointAffineNoInfinity{{.CurveNameUpperCase}}Zero(t *testing.T) {
var zeroP G1PointAffine
var zeroSanity G1BaseField
zeroSanity.SetZero()
assert.Equal(t, zeroP.X, zeroSanity)
assert.Equal(t, zeroP.Y, zeroSanity)
}
func TestPointAffineNoInfinity{{.CurveNameUpperCase}}FromLimbs(t *testing.T) {
// Initialize your test values
x := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
y := [8]uint32{9, 10, 11, 12, 13, 14, 15, 16}
xSlice := x[:]
ySlice := y[:]
// Execute your function
var result G1PointAffine
result.FromLimbs(&xSlice, &ySlice)
var xBase G1BaseField
var yBase G1BaseField
xBase.FromLimbs(x)
yBase.FromLimbs(y)
// Define your expected result
expected := &G1PointAffine{
X: xBase,
Y: yBase,
}
// Test if result is as expected
assert.Equal(t, result, expected)
}
func TestGetFixedLimbs(t *testing.T) {
t.Run("case of valid input of length less than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of valid input of length 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of empty input", func(t *testing.T) {
slice := []uint32{}
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
result := GetFixedLimbs(&slice)
assert.Equal(t, result, expected)
})
t.Run("case of input length greater than 8", func(t *testing.T) {
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
defer func() {
if r := recover(); r == nil {
t.Errorf("the code did not panic")
}
}()
GetFixedLimbs(&slice)
})
}

View File

@@ -1,95 +0,0 @@
import (
"encoding/binary"
"unsafe"
)
// #cgo CFLAGS: -I./include/
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
// #include "projective.h"
// #include "ve_mod_mult.h"
import "C"
// G2 extension field
type G2Element [{{.G2ElementSize}}]uint64
type ExtentionField struct {
A0, A1 G2Element
}
type G2PointAffine struct {
X, Y ExtentionField
}
type G2Point struct {
X, Y, Z ExtentionField
}
func (p *G2Point) Random() *G2Point {
outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
C.random_g2_projective_{{.CurveNameLowerCase}}(outC)
return p
}
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
out := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
in := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(affine))
C.g2_projective_from_affine_{{.CurveNameLowerCase}}(out, in)
return p
}
func (p *G2Point) Eq(pCompare *G2Point) bool {
// Cast *Point{{.CurveNameUpperCase}} to *C.{{.CurveNameUpperCase}}_projective_t
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
// between different pointer types.
// It's your responsibility to ensure that the types are compatible.
pC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
pCompareC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(pCompare))
// Call the C function
// The C function doesn't keep any references to the data,
// so it's fine if the Go garbage collector moves or deletes the data later.
return bool(C.eq_g2_{{.CurveNameLowerCase}}(pC, pCompareC))
}
func (f *G2Element) ToBytesLe() []byte {
var bytes []byte
for _, val := range f {
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
binary.LittleEndian.PutUint64(buf, val)
bytes = append(bytes, buf...)
}
return bytes
}
func (p *G2PointAffine) ToProjective() G2Point {
return G2Point{
X: p.X,
Y: p.Y,
Z: ExtentionField{
A0: G2Element{1, 0, 0, 0},
A1: G2Element{0, 0, 0, 0},
},
}
}
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
out := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(p))
in := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(projective))
C.g2_projective_to_affine_{{.CurveNameLowerCase}}(out, in)
return p
}
func (p *G2Point) IsOnCurve() bool {
// Directly copy memory from the C struct to the Go struct
point := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
res := C.g2_projective_is_on_curve_{{.CurveNameLowerCase}}(point)
return bool(res)
}

View File

@@ -1,58 +0,0 @@
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestG2Eqg2(t *testing.T) {
var point G2Point
point.Random()
assert.True(t, point.Eq(&point))
}
func TestG2FromProjectiveToAffine(t *testing.T) {
var projective G2Point
var affine G2PointAffine
projective.Random()
affine.FromProjective(&projective)
var projective2 G2Point
projective2.FromAffine(&affine)
assert.True(t, projective.IsOnCurve())
assert.True(t, projective2.IsOnCurve())
assert.True(t, projective.Eq(&projective2))
}
func TestG2Eqg2NotEqual(t *testing.T) {
var point G2Point
point.Random()
var point2 G2Point
point2.Random()
assert.False(t, point.Eq(&point2))
}
func TestG2ToBytes(t *testing.T) {
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
bytes := element.ToBytesLe()
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
}
func TestG2ShouldConvertToProjective(t *testing.T) {
var pointProjective G2Point
var pointAffine G2PointAffine
pointProjective.Random()
pointAffine.FromProjective(&pointProjective)
proj := pointAffine.ToProjective()
assert.True(t, proj.IsOnCurve())
assert.True(t, pointProjective.Eq(&proj))
}

View File

@@ -1,84 +0,0 @@
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdbool.h>
// msm.h
#ifndef _{{.CurveNameUpperCase}}_MSM_H
#define _{{.CurveNameUpperCase}}_MSM_H
#ifdef __cplusplus
extern "C" {
#endif
// Incomplete declaration of {{.CurveNameUpperCase}} projective and affine structs
typedef struct {{.CurveNameUpperCase}}_projective_t {{.CurveNameUpperCase}}_projective_t;
typedef struct {{.CurveNameUpperCase}}_g2_projective_t {{.CurveNameUpperCase}}_g2_projective_t;
typedef struct {{.CurveNameUpperCase}}_affine_t {{.CurveNameUpperCase}}_affine_t;
typedef struct {{.CurveNameUpperCase}}_g2_affine_t {{.CurveNameUpperCase}}_g2_affine_t;
typedef struct {{.CurveNameUpperCase}}_scalar_t {{.CurveNameUpperCase}}_scalar_t;
typedef cudaStream_t CudaStream_t;
int msm_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_projective_t* out, {{.CurveNameUpperCase}}_affine_t* points, {{.CurveNameUpperCase}}_scalar_t* scalars, size_t count, size_t device_id);
int msm_batch_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_projective_t* out,
{{.CurveNameUpperCase}}_affine_t* points,
{{.CurveNameUpperCase}}_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_projective_t* d_out,
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
{{.CurveNameUpperCase}}_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_projective_t* d_out,
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
{{.CurveNameUpperCase}}_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id);
int msm_g2_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_g2_projective_t* out,
{{.CurveNameUpperCase}}_g2_affine_t* points,
{{.CurveNameUpperCase}}_scalar_t* scalars,
size_t count,
size_t device_id);
int msm_batch_g2_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_g2_projective_t* out,
{{.CurveNameUpperCase}}_g2_affine_t* points,
{{.CurveNameUpperCase}}_scalar_t* scalars,
size_t batch_size,
size_t msm_size,
size_t device_id);
int commit_g2_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_g2_projective_t* d_out,
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
{{.CurveNameUpperCase}}_g2_affine_t* d_points,
size_t count,
unsigned large_bucket_factor,
size_t device_id);
int commit_batch_g2_cuda_{{.CurveNameLowerCase}}(
{{.CurveNameUpperCase}}_g2_projective_t* d_out,
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
{{.CurveNameUpperCase}}_g2_affine_t* d_points,
size_t count,
size_t batch_size,
size_t device_id,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif /* _{{.CurveNameUpperCase}}_MSM_H */

Some files were not shown because too many files have changed in this diff Show More