mirror of
https://github.com/pseXperiments/icicle.git
synced 2026-01-06 22:24:06 -05:00
ICICLE V2 Release (#492)
This PR introduces major updates for ICICLE Core, Rust and Golang bindings --------- Co-authored-by: Yuval Shekel <yshekel@gmail.com> Co-authored-by: DmytroTym <dmytrotym1@gmail.com> Co-authored-by: Otsar <122266060+Otsar-Raikou@users.noreply.github.com> Co-authored-by: VitaliiH <vhnatyk@gmail.com> Co-authored-by: release-bot <release-bot@ingonyama.com> Co-authored-by: Stas <spolonsky@icloud.com> Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com> Co-authored-by: ImmanuelSegol <3ditds@gmail.com> Co-authored-by: JimmyHongjichuan <45908291+JimmyHongjichuan@users.noreply.github.com> Co-authored-by: pierre <pierreuu@gmail.com> Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com> Co-authored-by: nonam3e <timur@ingonyama.com> Co-authored-by: Vlad <88586482+vladfdp@users.noreply.github.com> Co-authored-by: LeonHibnik <leon@ingonyama.com> Co-authored-by: nonam3e <71525212+nonam3e@users.noreply.github.com> Co-authored-by: vladfdp <vlad.heintz@gmail.com>
This commit is contained in:
@@ -3,3 +3,4 @@ crate
|
||||
lmit
|
||||
mut
|
||||
uint
|
||||
dout
|
||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
|
||||
jobs:
|
||||
spelling-checker:
|
||||
|
||||
40
.github/workflows/cpp_cuda.yml
vendored
40
.github/workflows/cpp_cuda.yml
vendored
@@ -4,11 +4,11 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: if [[ $(find ./ \( -path ./icicle/build -prune -o -path ./**/target -prune -o -path ./examples -prune \) -iname *.h -or -iname *.cuh -or -iname *.cu -or -iname *.c -or -iname *.cpp | xargs clang-format --dry-run -ferror-limit=1 -style=file 2>&1) ]]; then echo "Please run clang-format"; exit 1; fi
|
||||
|
||||
test-linux:
|
||||
test-linux-curve:
|
||||
name: Test on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: [check-changed-files, check-format]
|
||||
@@ -39,14 +39,36 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Build
|
||||
- name: Build curve
|
||||
working-directory: ./icicle
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: |
|
||||
mkdir -p build
|
||||
cmake -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release -DCURVE=${{ matrix.curve }} -DG2_DEFINED=ON -S . -B build
|
||||
cmake --build build
|
||||
- name: Run C++ Tests
|
||||
working-directory: ./icicle/build
|
||||
mkdir -p build && rm -rf build/*
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=ON -DCURVE=${{ matrix.curve }} -DG2=ON -S . -B build
|
||||
cmake --build build -j
|
||||
- name: Run C++ curve Tests
|
||||
working-directory: ./icicle/build/tests
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: ctest
|
||||
|
||||
test-linux-field:
|
||||
name: Test on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: [check-changed-files, check-format]
|
||||
strategy:
|
||||
matrix:
|
||||
field: [babybear]
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Build field
|
||||
working-directory: ./icicle
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: |
|
||||
mkdir -p build && rm -rf build/*
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=ON -DFIELD=${{ matrix.field }} -DEXT_FIELD=ON -S . -B build
|
||||
cmake --build build -j
|
||||
- name: Run C++ field Tests
|
||||
working-directory: ./icicle/build/tests
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: ctest
|
||||
4
.github/workflows/examples.yml
vendored
4
.github/workflows/examples.yml
vendored
@@ -11,11 +11,11 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
63
.github/workflows/golang.yml
vendored
63
.github/workflows/golang.yml
vendored
@@ -4,11 +4,11 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -33,13 +33,23 @@ jobs:
|
||||
if: needs.check-changed-files.outputs.golang == 'true'
|
||||
run: if [[ $(go list ./... | xargs go fmt) ]]; then echo "Please run go fmt"; exit 1; fi
|
||||
|
||||
build-linux:
|
||||
name: Build on Linux
|
||||
build-curves-linux:
|
||||
name: Build curves on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: [check-changed-files, check-format]
|
||||
strategy:
|
||||
matrix:
|
||||
curve: [bn254, bls12_381, bls12_377, bw6_761]
|
||||
curve:
|
||||
- name: bn254
|
||||
build_args: -g2 -ecntt
|
||||
- name: bls12_381
|
||||
build_args: -g2 -ecntt
|
||||
- name: bls12_377
|
||||
build_args: -g2 -ecntt
|
||||
- name: bw6_761
|
||||
build_args: -g2 -ecntt
|
||||
- name: grumpkin
|
||||
build_args:
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
@@ -50,19 +60,50 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: ./wrappers/golang
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: ./build.sh ${{ matrix.curve }} ON ON # builds a single curve with G2 and ECNTT enabled
|
||||
run: ./build.sh -curve=${{ matrix.curve.name }} ${{ matrix.curve.build_args }} # builds a single curve with G2 and ECNTT enabled
|
||||
- name: Upload ICICLE lib artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
with:
|
||||
name: icicle-builds-${{ matrix.curve }}-${{ github.workflow }}-${{ github.sha }}
|
||||
path: icicle/build/libingo_${{ matrix.curve }}.a
|
||||
name: icicle-builds-${{ matrix.curve.name }}-${{ github.workflow }}-${{ github.sha }}
|
||||
path: |
|
||||
icicle/build/lib/libingo_curve_${{ matrix.curve.name }}.a
|
||||
icicle/build/lib/libingo_field_${{ matrix.curve.name }}.a
|
||||
retention-days: 1
|
||||
|
||||
build-fields-linux:
|
||||
name: Build fields on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: [check-changed-files, check-format]
|
||||
strategy:
|
||||
matrix:
|
||||
field:
|
||||
- name: babybear
|
||||
build_args: -field-ext
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20.0'
|
||||
- name: Build
|
||||
working-directory: ./wrappers/golang
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: ./build.sh -field=${{ matrix.field.name }} ${{ matrix.field.build_args }} # builds a single field with field-ext enabled
|
||||
- name: Upload ICICLE lib artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
with:
|
||||
name: icicle-builds-${{ matrix.field.name }}-${{ github.workflow }}-${{ github.sha }}
|
||||
path: |
|
||||
icicle/build/lib/libingo_field_${{ matrix.field.name }}.a
|
||||
retention-days: 1
|
||||
|
||||
test-linux:
|
||||
name: Test on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: [check-changed-files, build-linux]
|
||||
needs: [check-changed-files, build-curves-linux, build-fields-linux]
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
@@ -74,7 +115,7 @@ jobs:
|
||||
uses: actions/download-artifact@v4
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
with:
|
||||
path: ./icicle/build/
|
||||
path: ./icicle/build/lib
|
||||
merge-multiple: true
|
||||
- name: Run Tests
|
||||
working-directory: ./wrappers/golang
|
||||
@@ -83,7 +124,7 @@ jobs:
|
||||
# -p controls the number of programs that can be run in parallel
|
||||
run: |
|
||||
export CPATH=$CPATH:/usr/local/cuda/include
|
||||
go test --tags=g2 ./... -count=1 -failfast -p 2 -timeout 60m
|
||||
go test ./... -count=1 -failfast -p 2 -timeout 60m
|
||||
|
||||
# TODO: bw6 on windows requires more memory than the standard runner has
|
||||
# Add a large runner and then enable this job
|
||||
|
||||
16
.github/workflows/rust.yml
vendored
16
.github/workflows/rust.yml
vendored
@@ -4,11 +4,11 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- V2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -60,7 +60,17 @@ jobs:
|
||||
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
# Running tests from the root workspace will run all workspace members' tests by default
|
||||
# We need to limit the number of threads to avoid running out of memory on weaker machines
|
||||
run: cargo test --release --verbose --features=g2 -- --test-threads=2
|
||||
# ignored tests are polynomial tests. Since they conflict with NTT tests, they are executed sperately
|
||||
run: |
|
||||
cargo test --workspace --exclude icicle-babybear --release --verbose --features=g2 -- --test-threads=2 --ignored
|
||||
cargo test --workspace --exclude icicle-babybear --release --verbose --features=g2 -- --test-threads=2
|
||||
|
||||
- name: Run baby bear tests
|
||||
working-directory: ./wrappers/rust/icicle-fields/icicle-babybear
|
||||
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: |
|
||||
cargo test --release --verbose -- --ignored
|
||||
cargo test --release --verbose
|
||||
|
||||
build-windows:
|
||||
name: Build on Windows
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -16,6 +16,6 @@
|
||||
**/Cargo.lock
|
||||
**/icicle/build/
|
||||
**/wrappers/rust/icicle-cuda-runtime/src/bindings.rs
|
||||
**/build
|
||||
**/build*
|
||||
**/icicle/appUtils/large_ntt/work
|
||||
icicle/appUtils/large_ntt/work/test_ntt
|
||||
|
||||
181
docs/docs/icicle/core.md
Normal file
181
docs/docs/icicle/core.md
Normal file
@@ -0,0 +1,181 @@
|
||||
# ICICLE Core
|
||||
|
||||
ICICLE Core is a library written in C++/CUDA. All the ICICLE primitives are implemented within ICICLE Core.
|
||||
|
||||
The Core is split into logical modules that can be compiled into static libraries using different [strategies](#compilation-strategies). You can then [link](#linking) these libraries with your C++ project or write your own [bindings](#writing-new-bindings-for-icicle) for other programming languages. If you want to use ICICLE with existing bindings please refer to [Rust](/icicle/rust-bindings) / [Golang](/icicle/golang-bindings).
|
||||
|
||||
## Compilation strategies
|
||||
|
||||
Most of the codebase is curve/field agnostic, which means it can be compiled for different curves and fields. When you build ICICLE Core you choose a single curve or field. If you need multiple curves or fields - you just compile ICICLE into multiple static libraries. It's that simple. Currently, the following choices are supported:
|
||||
|
||||
- [Field mode](#compiling-for-a-field) - used for STARK fields like BabyBear / Mersenne / Goldilocks. Includes field arithmetic, NTT, Poseidon, Extension fields and other primitives.
|
||||
- [Curve mode](#compiling-for-a-curve) - used for SNARK curves like BN254/ BLS curves / Grumpkin / etc. Curve mode is built upon field mode, so it includes everything that field does. It also includes curve operations / MSM / ECNTT / G2 and other curve-related primitives.
|
||||
|
||||
:::info
|
||||
|
||||
If you only want to use curve's scalar/base field, you still need to go with a curve mode. You can disable MSM with [options](#compilation-options)
|
||||
|
||||
:::
|
||||
|
||||
### Compiling for a field
|
||||
|
||||
ICICLE supports the following STARK fields:
|
||||
- [BabyBear](https://eprint.iacr.org/2023/824.pdf)
|
||||
|
||||
Field mode includes:
|
||||
- [Field arithmetic](https://github.com/ingonyama-zk/icicle/blob/main/icicle/include/fields/field.cuh) - field multiplication, addition, subtraction
|
||||
- [NTT](icicle/primitives/ntt) - FFT / iFFT
|
||||
- [Poseidon Hash](icicle/primitives/poseidon)
|
||||
- [Vector operations](https://github.com/ingonyama-zk/icicle/blob/main/icicle/include/vec_ops/vec_ops.cuh)
|
||||
- [Polynomial](#) - structs and methods to work with polynomials
|
||||
|
||||
You can compile ICICLE for a STARK field using this command:
|
||||
|
||||
```sh
|
||||
cd icicle
|
||||
mkdir -p build
|
||||
cmake -DFIELD=<FIELD> -S . -B build
|
||||
cmake --build build -j
|
||||
```
|
||||
|
||||
Icicle Supports the following `<FIELD>` FIELDS:
|
||||
- `babybear`
|
||||
|
||||
This command will output `libingo_field_<FIELD>.a` into `build/lib`.
|
||||
|
||||
### Compiling for a curve
|
||||
|
||||
ICICLE supports the following SNARK curves:
|
||||
- [BN254](https://neuromancer.sk/std/bn/bn254)
|
||||
- [BLS12-377](https://neuromancer.sk/std/bls/BLS12-377)
|
||||
- [BLS12-381](https://neuromancer.sk/std/bls/BLS12-381)
|
||||
- [BW6-761](https://eprint.iacr.org/2020/351)
|
||||
- Grumpkin
|
||||
|
||||
Curve mode includes everything you can find in field mode with addition of:
|
||||
- [MSM](icicle/primitives/msm) - MSM / Batched MSM
|
||||
- [ECNTT](#)
|
||||
|
||||
:::note
|
||||
|
||||
Field related primitives will be compiled for the scalar field of the curve
|
||||
|
||||
:::
|
||||
|
||||
You can compile ICICLE for a SNARK curve using this command:
|
||||
|
||||
```sh
|
||||
cd icicle
|
||||
mkdir -p build
|
||||
cmake -DCURVE=<CURVE> -S . -B build
|
||||
cmake --build build -j
|
||||
```
|
||||
|
||||
Where `<CURVE>` can be one of `bn254`/`bls12_377`/`bls12_381`/`bw6_761`/`grumpkin`.
|
||||
|
||||
This command will output both `libingo_curve_<CURVE>.a` and `libingo_field_<CURVE>.a` into `build/lib`.
|
||||
|
||||
### Compilation options
|
||||
|
||||
There exist multiple options that allow you to customize your build or enable additional functionality.
|
||||
|
||||
#### EXT_FIELD
|
||||
|
||||
Used only in a [field mode](#compiling-for-a-field) to add Extension field into a build. Adds NTT for the extension field.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
Usage: `-DEXT_FIELD=ON`
|
||||
|
||||
#### G2
|
||||
|
||||
Used only in a [curve mode](#compiling-for-a-curve) to add G2 definitions into a build. Also adds G2 MSM.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
Usage: `-DG2=ON`
|
||||
|
||||
#### ECNTT
|
||||
|
||||
Used only in a [curve mode](#compiling-for-a-curve) to add ECNTT function into a build.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
Usage: `-DECNTT=ON`
|
||||
|
||||
#### MSM
|
||||
|
||||
Used only in a [curve mode](#compiling-for-a-curve) to add MSM function into a build. As MSM takes a lot of time to build, you can disable it with this option to reduce compilation time.
|
||||
|
||||
Default: `ON`
|
||||
|
||||
Usage: `-DMSM=OFF`
|
||||
|
||||
#### BUILD_HASH
|
||||
|
||||
Can be used in any mode to build a hash library. Currently it only includes Keccak hash function, but more are coming.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
Usage: `-DBUILD_HASH=ON`
|
||||
|
||||
#### BUILD_TESTS
|
||||
|
||||
Can be used in any mode to include tests runner binary.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
USAGE: `-DBUILD_TESTS=ON`
|
||||
|
||||
#### BUILD_BENCHMARKS
|
||||
|
||||
Can be used in any mode to include benchmarks runner binary.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
USAGE: `-DBUILD_BENCHMARKS=ON`
|
||||
|
||||
#### DEVMODE
|
||||
|
||||
Can be used in any mode to include debug symbols in the build.
|
||||
|
||||
Default: `OFF`
|
||||
|
||||
USAGE: `-DEVMODE=ON`
|
||||
|
||||
## Linking
|
||||
|
||||
To link ICICLE with your project you first need to compile ICICLE with options of your choice. After that you can use CMake `target_link_libraries` to link with the generated static libraries and `target_include_directories` to include ICICLE headers (located in `icicle/include`).
|
||||
|
||||
Refer to our [c++ examples](https://github.com/ingonyama-zk/icicle/tree/main/examples/c%2B%2B) for more info. Take a look at this [CMakeLists.txt](https://github.com/ingonyama-zk/icicle/blob/main/examples/c%2B%2B/msm/CMakeLists.txt#L22)
|
||||
|
||||
|
||||
## Writing new bindings for ICICLE
|
||||
|
||||
Since ICICLE Core is written in CUDA / C++ its really simple to generate static libraries. These static libraries can be installed on any system and called by higher level languages such as Golang.
|
||||
|
||||
Static libraries can be loaded into memory once and used by multiple programs, reducing memory usage and potentially improving performance. They also allow you to separate functionality into distinct modules so your static library may need to compile only specific features that you want to use.
|
||||
|
||||
Let's review the [Golang bindings](golang-bindings.md) since its a pretty verbose example (compared to rust which hides it pretty well) of using static libraries. Golang has a library named `CGO` which can be used to link static libraries. Here's a basic example on how you can use cgo to link these libraries:
|
||||
|
||||
```go
|
||||
/*
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lbn254 -lbls12_381 -lbls12_377 -lbw6_671
|
||||
#include "icicle.h" // make sure you use the correct header file(s)
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
// Now you can call the C functions from the ICICLE libraries.
|
||||
// Note that C function calls are prefixed with 'C.' in Go code.
|
||||
|
||||
out := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BN254_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bn254(out, in)
|
||||
}
|
||||
```
|
||||
|
||||
The comments on the first line tell `CGO` which libraries to import as well as which header files to include. You can then call methods which are part of the static library and defined in the header file, `C.projective_from_affine_bn254` is an example.
|
||||
|
||||
If you wish to create your own bindings for a language of your choice we suggest you start by investigating how you can call static libraries.
|
||||
@@ -33,28 +33,31 @@ go get github.com/ingonyama-zk/icicle@<commit_id>
|
||||
|
||||
To build the shared libraries you can run this script:
|
||||
|
||||
```bash
|
||||
./build.sh [-curve=<curve> | -field=<field>] [-cuda_version=<version>] [-g2] [-ecntt] [-devmode]
|
||||
```
|
||||
./build <curve> [G2_enabled]
|
||||
- **`curve`** - The name of the curve to build or "all" to build all curves
|
||||
- **`field`** - The name of the field to build or "all" to build all fields
|
||||
- **`g2`** - Optional - build with G2 enabled
|
||||
- **`ecntt`** - Optional - build with ECNTT enabled
|
||||
- **`devmode`** - Optional - build in devmode
|
||||
- Usage can be displayed with the flag `-help`
|
||||
|
||||
curve - The name of the curve to build or "all" to build all curves
|
||||
G2_enabled - Optional - To build with G2 enabled
|
||||
```
|
||||
|
||||
For example if you want to build all curves with G2 enabled you would run:
|
||||
To build ICICLE libraries for all supported curves with G2 and ECNTT enabled.
|
||||
|
||||
```bash
|
||||
./build.sh all ON
|
||||
./build.sh all -g2 -ecntt
|
||||
```
|
||||
|
||||
If you are interested in building a specific curve you would run:
|
||||
If you wish to build for a specific curve, for example bn254, without G2 or ECNTT enabled.
|
||||
|
||||
```bash
|
||||
./build.sh bls12_381 ON
|
||||
``` bash
|
||||
./build.sh -curve=bn254
|
||||
```
|
||||
|
||||
Now you can import ICICLE into your project
|
||||
|
||||
```golang
|
||||
```go
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
@@ -85,13 +88,13 @@ go test <path_to_curve> -count=1
|
||||
|
||||
The libraries produced from the CUDA code compilation are used to bind Golang to ICICLE's CUDA code.
|
||||
|
||||
1. These libraries (named `libingo_<curve>.a`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
|
||||
1. These libraries (named `libingo_curve_<curve>.a` and `libingo_field_<curve>.a`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
|
||||
|
||||
2. In your Go project, you can use `cgo` to link these libraries. Here's a basic example on how you can use `cgo` to link these libraries:
|
||||
|
||||
```go
|
||||
/*
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lingo_bn254
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lingo_curve_bn254 -L$/path/to/shared/libs -lingo_field_bn254 -lstdc++ -lm
|
||||
#include "icicle.h" // make sure you use the correct header file(s)
|
||||
*/
|
||||
import "C"
|
||||
|
||||
97
docs/docs/icicle/golang-bindings/ecntt.md
Normal file
97
docs/docs/icicle/golang-bindings/ecntt.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# ECNTT
|
||||
|
||||
### Supported curves
|
||||
|
||||
`bls12-377`, `bls12-381`, `bn254`
|
||||
|
||||
## ECNTT Method
|
||||
|
||||
The `ECNtt[T any]()` function performs the Elliptic Curve Number Theoretic Transform (EC-NTT) on the input points slice, using the provided dir (direction), cfg (configuration), and stores the results in the results slice.
|
||||
|
||||
```go
|
||||
func ECNtt[T any](points core.HostOrDeviceSlice, dir core.NTTDir, cfg *core.NTTConfig[T], results core.HostOrDeviceSlice) core.IcicleError
|
||||
```
|
||||
|
||||
### Parameters:
|
||||
|
||||
- **`points`**: A slice of elliptic curve points (in projective coordinates) that will be transformed. The slice can be stored on the host or the device, as indicated by the `core.HostOrDeviceSlice` type.
|
||||
- **`dir`**: The direction of the EC-NTT transform, either `core.KForward` or `core.KInverse`.
|
||||
- **`cfg`**: A pointer to an `NTTConfig` object, containing configuration options for the NTT operation.
|
||||
- **`results`**: A slice that will store the transformed elliptic curve points (in projective coordinates). The slice can be stored on the host or the device, as indicated by the `core.HostOrDeviceSlice` type.
|
||||
|
||||
|
||||
### Return Value
|
||||
|
||||
- **`CudaError`**: A `core.IcicleError` value, which will be `core.IcicleErrorCode(0)` if the EC-NTT operation was successful, or an error if something went wrong.
|
||||
|
||||
## NTT Configuration (NTTConfig)
|
||||
|
||||
The `NTTConfig` structure holds configuration parameters for the NTT operation, allowing customization of its behavior to optimize performance based on the specifics of your protocol.
|
||||
|
||||
```go
|
||||
type NTTConfig[T any] struct {
|
||||
Ctx cr.DeviceContext
|
||||
CosetGen T
|
||||
BatchSize int32
|
||||
ColumnsBatch bool
|
||||
Ordering Ordering
|
||||
areInputsOnDevice bool
|
||||
areOutputsOnDevice bool
|
||||
IsAsync bool
|
||||
NttAlgorithm NttAlgorithm
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
- **`Ctx`**: Device context containing details like device ID and stream ID.
|
||||
- **`CosetGen`**: Coset generator used for coset (i)NTTs, defaulting to no coset being used.
|
||||
- **`BatchSize`**: The number of NTTs to compute in one operation, defaulting to 1.
|
||||
- **`ColumnsBatch`**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
|
||||
- **`Ordering`**: Ordering of inputs and outputs (`KNN`, `KNR`, `KRN`, `KRR`), affecting how data is arranged.
|
||||
- **`areInputsOnDevice`**: Indicates if input scalars are located on the device.
|
||||
- **`areOutputsOnDevice`**: Indicates if results are stored on the device.
|
||||
- **`IsAsync`**: Controls whether the NTT operation runs asynchronously.
|
||||
- **`NttAlgorithm`**: Explicitly select the NTT algorithm. ECNTT supports running on `Radix2` algoruithm.
|
||||
|
||||
### Default Configuration
|
||||
|
||||
Use `GetDefaultNTTConfig` to obtain a default configuration, customizable as needed.
|
||||
|
||||
```go
|
||||
func GetDefaultNTTConfig[T any](cosetGen T) NTTConfig[T]
|
||||
```
|
||||
|
||||
## ECNTT Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
// Obtain the default NTT configuration with a predefined coset generator.
|
||||
cfg := GetDefaultNttConfig()
|
||||
|
||||
// Define the size of the input scalars.
|
||||
size := 1 << 18
|
||||
|
||||
// Generate Points for the ECNTT operation.
|
||||
points := GenerateProjectivePoints(size)
|
||||
|
||||
// Set the direction of the NTT (forward or inverse).
|
||||
dir := core.KForward
|
||||
|
||||
// Allocate memory for the results of the NTT operation.
|
||||
results := make(core.HostSlice[Projective], size)
|
||||
|
||||
// Perform the NTT operation.
|
||||
err := ECNtt(points, dir, &cfg, results)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("ECNTT operation failed")
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -4,7 +4,7 @@ To understand the theory behind MSM pre computation technique refer to Niall Emm
|
||||
|
||||
### Supported curves
|
||||
|
||||
`bls12-377`, `bls12-381`, `bn254`, `bw6-761`
|
||||
`bls12-377`, `bls12-381`, `bn254`, `bw6-761`, `grumpkin`
|
||||
|
||||
## Core package
|
||||
|
||||
@@ -37,15 +37,27 @@ func PrecomputeBases(points core.HostOrDeviceSlice, precomputeFactor int32, c in
|
||||
##### Example
|
||||
|
||||
```go
|
||||
cfg := GetDefaultMSMConfig()
|
||||
points := GenerateAffinePoints(1024)
|
||||
precomputeFactor := 8
|
||||
var precomputeOut core.DeviceSlice
|
||||
_, e := precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
|
||||
package main
|
||||
|
||||
err := PrecomputeBases(points, precomputeFactor, 0, &cfg.Ctx, precomputeOut)
|
||||
if err != cr.CudaSuccess {
|
||||
log.Fatalf("PrecomputeBases failed: %v", err)
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := bn254.GetDefaultMSMConfig()
|
||||
points := bn254.GenerateAffinePoints(1024)
|
||||
var precomputeFactor int32 = 8
|
||||
var precomputeOut core.DeviceSlice
|
||||
precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
|
||||
|
||||
err := bn254.PrecomputeBases(points, precomputeFactor, 0, &cfg.Ctx, precomputeOut)
|
||||
if err != cr.CudaSuccess {
|
||||
log.Fatalf("PrecomputeBases failed: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -68,15 +80,27 @@ func G2PrecomputeBases(points core.HostOrDeviceSlice, precomputeFactor int32, c
|
||||
##### Example
|
||||
|
||||
```go
|
||||
cfg := G2GetDefaultMSMConfig()
|
||||
points := G2GenerateAffinePoints(1024)
|
||||
precomputeFactor := 8
|
||||
var precomputeOut core.DeviceSlice
|
||||
_, e := precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
|
||||
package main
|
||||
|
||||
err := G2PrecomputeBases(points, precomputeFactor, 0, &cfg.Ctx, precomputeOut)
|
||||
if err != cr.CudaSuccess {
|
||||
log.Fatalf("G2PrecomputeBases failed: %v", err)
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
g2 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254/g2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := g2.G2GetDefaultMSMConfig()
|
||||
points := g2.G2GenerateAffinePoints(1024)
|
||||
var precomputeFactor int32 = 8
|
||||
var precomputeOut core.DeviceSlice
|
||||
precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
|
||||
|
||||
err := g2.G2PrecomputeBases(points, precomputeFactor, 0, &cfg.Ctx, precomputeOut)
|
||||
if err != cr.CudaSuccess {
|
||||
log.Fatalf("PrecomputeBases failed: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
### Supported curves
|
||||
|
||||
`bls12-377`, `bls12-381`, `bn254`, `bw6-761`
|
||||
`bls12-377`, `bls12-381`, `bn254`, `bw6-761`, `grumpkin`
|
||||
|
||||
## MSM Example
|
||||
|
||||
@@ -11,52 +11,54 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
// Obtain the default MSM configuration.
|
||||
cfg := GetDefaultMSMConfig()
|
||||
|
||||
// Define the size of the problem, here 2^18.
|
||||
size := 1 << 18
|
||||
func main() {
|
||||
// Obtain the default MSM configuration.
|
||||
cfg := bn254.GetDefaultMSMConfig()
|
||||
|
||||
// Generate scalars and points for the MSM operation.
|
||||
scalars := GenerateScalars(size)
|
||||
points := GenerateAffinePoints(size)
|
||||
// Define the size of the problem, here 2^18.
|
||||
size := 1 << 18
|
||||
|
||||
// Create a CUDA stream for asynchronous operations.
|
||||
stream, _ := cr.CreateStream()
|
||||
var p Projective
|
||||
|
||||
// Allocate memory on the device for the result of the MSM operation.
|
||||
var out core.DeviceSlice
|
||||
_, e := out.MallocAsync(p.Size(), p.Size(), stream)
|
||||
// Generate scalars and points for the MSM operation.
|
||||
scalars := bn254.GenerateScalars(size)
|
||||
points := bn254.GenerateAffinePoints(size)
|
||||
|
||||
if e != cr.CudaSuccess {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
// Set the CUDA stream in the MSM configuration.
|
||||
cfg.Ctx.Stream = &stream
|
||||
cfg.IsAsync = true
|
||||
|
||||
// Perform the MSM operation.
|
||||
e = Msm(scalars, points, &cfg, out)
|
||||
|
||||
if e != cr.CudaSuccess {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
// Allocate host memory for the results and copy the results from the device.
|
||||
outHost := make(core.HostSlice[Projective], 1)
|
||||
cr.SynchronizeStream(&stream)
|
||||
outHost.CopyFromDevice(&out)
|
||||
|
||||
// Free the device memory allocated for the results.
|
||||
out.Free()
|
||||
// Create a CUDA stream for asynchronous operations.
|
||||
stream, _ := cr.CreateStream()
|
||||
var p bn254.Projective
|
||||
|
||||
// Allocate memory on the device for the result of the MSM operation.
|
||||
var out core.DeviceSlice
|
||||
_, e := out.MallocAsync(p.Size(), p.Size(), stream)
|
||||
|
||||
if e != cr.CudaSuccess {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
// Set the CUDA stream in the MSM configuration.
|
||||
cfg.Ctx.Stream = &stream
|
||||
cfg.IsAsync = true
|
||||
|
||||
// Perform the MSM operation.
|
||||
e = bn254.Msm(scalars, points, &cfg, out)
|
||||
|
||||
if e != cr.CudaSuccess {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
// Allocate host memory for the results and copy the results from the device.
|
||||
outHost := make(core.HostSlice[bn254.Projective], 1)
|
||||
cr.SynchronizeStream(&stream)
|
||||
outHost.CopyFromDevice(&out)
|
||||
|
||||
// Free the device memory allocated for the results.
|
||||
out.Free()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## MSM Method
|
||||
@@ -67,14 +69,14 @@ func Msm(scalars core.HostOrDeviceSlice, points core.HostOrDeviceSlice, cfg *cor
|
||||
|
||||
### Parameters
|
||||
|
||||
- **scalars**: A slice containing the scalars for multiplication. It can reside either in host memory or device memory.
|
||||
- **points**: A slice containing the points to be multiplied with scalars. Like scalars, these can also be in host or device memory.
|
||||
- **cfg**: A pointer to an `MSMConfig` object, which contains various configuration options for the MSM operation.
|
||||
- **results**: A slice where the results of the MSM operation will be stored. This slice can be in host or device memory.
|
||||
- **`scalars`**: A slice containing the scalars for multiplication. It can reside either in host memory or device memory.
|
||||
- **`points`**: A slice containing the points to be multiplied with scalars. Like scalars, these can also be in host or device memory.
|
||||
- **`cfg`**: A pointer to an `MSMConfig` object, which contains various configuration options for the MSM operation.
|
||||
- **`results`**: A slice where the results of the MSM operation will be stored. This slice can be in host or device memory.
|
||||
|
||||
### Return Value
|
||||
|
||||
- **CudaError**: Returns a CUDA error code indicating the success or failure of the MSM operation.
|
||||
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the MSM operation.
|
||||
|
||||
## MSMConfig
|
||||
|
||||
@@ -100,19 +102,19 @@ type MSMConfig struct {
|
||||
|
||||
### Fields
|
||||
|
||||
- **Ctx**: Device context containing details like device id and stream.
|
||||
- **PrecomputeFactor**: Controls the number of extra points to pre-compute.
|
||||
- **C**: Window bitsize, a key parameter in the "bucket method" for MSM.
|
||||
- **Bitsize**: Number of bits of the largest scalar.
|
||||
- **LargeBucketFactor**: Sensitivity to frequently occurring buckets.
|
||||
- **batchSize**: Number of results to compute in one batch.
|
||||
- **areScalarsOnDevice**: Indicates if scalars are located on the device.
|
||||
- **AreScalarsMontgomeryForm**: True if scalars are in Montgomery form.
|
||||
- **arePointsOnDevice**: Indicates if points are located on the device.
|
||||
- **ArePointsMontgomeryForm**: True if point coordinates are in Montgomery form.
|
||||
- **areResultsOnDevice**: Indicates if results are stored on the device.
|
||||
- **IsBigTriangle**: If `true` MSM will run in Large triangle accumulation if `false` Bucket accumulation will be chosen. Default value: false.
|
||||
- **IsAsync**: If true, runs MSM asynchronously.
|
||||
- **`Ctx`**: Device context containing details like device id and stream.
|
||||
- **`PrecomputeFactor`**: Controls the number of extra points to pre-compute.
|
||||
- **`C`**: Window bitsize, a key parameter in the "bucket method" for MSM.
|
||||
- **`Bitsize`**: Number of bits of the largest scalar.
|
||||
- **`LargeBucketFactor`**: Sensitivity to frequently occurring buckets.
|
||||
- **`batchSize`**: Number of results to compute in one batch.
|
||||
- **`areScalarsOnDevice`**: Indicates if scalars are located on the device.
|
||||
- **`AreScalarsMontgomeryForm`**: True if scalars are in Montgomery form.
|
||||
- **`arePointsOnDevice`**: Indicates if points are located on the device.
|
||||
- **`ArePointsMontgomeryForm`**: True if point coordinates are in Montgomery form.
|
||||
- **`areResultsOnDevice`**: Indicates if results are stored on the device.
|
||||
- **`IsBigTriangle`**: If `true` MSM will run in Large triangle accumulation if `false` Bucket accumulation will be chosen. Default value: false.
|
||||
- **`IsAsync`**: If true, runs MSM asynchronously.
|
||||
|
||||
### Default Configuration
|
||||
|
||||
@@ -157,44 +159,43 @@ out.Malloc(batchSize*p.Size(), p.Size())
|
||||
|
||||
## Support for G2 group
|
||||
|
||||
To activate G2 support first you must make sure you are building the static libraries with G2 feature enabled.
|
||||
To activate G2 support first you must make sure you are building the static libraries with G2 feature enabled as described in the [Golang building instructions](../golang-bindings.md#using-icicle-golang-bindings-in-your-project).
|
||||
|
||||
```bash
|
||||
./build.sh bls12_381 ON
|
||||
```
|
||||
|
||||
Now when importing `icicle`, you should have access to G2 features.
|
||||
|
||||
Now you may import `g2` package of the specified curve.
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/curves/bls254/g2"
|
||||
)
|
||||
```
|
||||
|
||||
These features include `G2Projective` and `G2Affine` points as well as a `G2Msm` method.
|
||||
This package include `G2Projective` and `G2Affine` points as well as a `G2Msm` method.
|
||||
|
||||
```go
|
||||
...
|
||||
package main
|
||||
|
||||
cfg := GetDefaultMSMConfig()
|
||||
size := 1 << 12
|
||||
batchSize := 3
|
||||
totalSize := size * batchSize
|
||||
scalars := GenerateScalars(totalSize)
|
||||
points := G2GenerateAffinePoints(totalSize)
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
g2 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254/g2"
|
||||
)
|
||||
|
||||
var p G2Projective
|
||||
var out core.DeviceSlice
|
||||
out.Malloc(batchSize*p.Size(), p.Size())
|
||||
G2Msm(scalars, points, &cfg, out)
|
||||
func main() {
|
||||
cfg := bn254.GetDefaultMSMConfig()
|
||||
size := 1 << 12
|
||||
batchSize := 3
|
||||
totalSize := size * batchSize
|
||||
scalars := bn254.GenerateScalars(totalSize)
|
||||
points := g2.G2GenerateAffinePoints(totalSize)
|
||||
|
||||
var p g2.G2Projective
|
||||
var out core.DeviceSlice
|
||||
out.Malloc(batchSize*p.Size(), p.Size())
|
||||
g2.G2Msm(scalars, points, &cfg, out)
|
||||
}
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
`G2Msm` works the same way as normal MSM, the difference is that it uses G2 Points.
|
||||
|
||||
Additionally when you are building your application make sure to use the g2 feature flag
|
||||
|
||||
```bash
|
||||
go build -tags=g2
|
||||
```
|
||||
|
||||
@@ -15,41 +15,52 @@ In this example we will display how you can
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func main() {
|
||||
numDevices, _ := cuda_runtime.GetDeviceCount()
|
||||
numDevices, _ := cr.GetDeviceCount()
|
||||
fmt.Println("There are ", numDevices, " devices available")
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for i := 0; i < numDevices; i++ {
|
||||
wg.Add(1)
|
||||
// RunOnDevice makes sure each MSM runs on a single thread
|
||||
cuda_runtime.RunOnDevice(i, func(args ...any) {
|
||||
// RunOnDevice makes sure each MSM runs on a single thread
|
||||
cr.RunOnDevice(i, func(args ...any) {
|
||||
defer wg.Done()
|
||||
cfg := GetDefaultMSMConfig()
|
||||
cfg := bn254.GetDefaultMSMConfig()
|
||||
cfg.IsAsync = true
|
||||
for _, power := range []int{10, 18} {
|
||||
size := 1 << power // 2^pwr
|
||||
|
||||
// generate random scalars
|
||||
scalars := GenerateScalars(size)
|
||||
points := GenerateAffinePoints(size)
|
||||
// generate random scalars
|
||||
scalars := bn254.GenerateScalars(size)
|
||||
points := bn254.GenerateAffinePoints(size)
|
||||
|
||||
// create a stream and allocate result pointer
|
||||
stream, _ := cuda_runtime.CreateStream()
|
||||
var p Projective
|
||||
// create a stream and allocate result pointer
|
||||
stream, _ := cr.CreateStream()
|
||||
var p bn254.Projective
|
||||
var out core.DeviceSlice
|
||||
_, e := out.MallocAsync(p.Size(), p.Size(), stream)
|
||||
// assign stream to device context
|
||||
out.MallocAsync(p.Size(), p.Size(), stream)
|
||||
// assign stream to device context
|
||||
cfg.Ctx.Stream = &stream
|
||||
|
||||
// execute MSM
|
||||
e = Msm(scalars, points, &cfg, out)
|
||||
// read result from device
|
||||
outHost := make(core.HostSlice[Projective], 1)
|
||||
// execute MSM
|
||||
bn254.Msm(scalars, points, &cfg, out)
|
||||
// read result from device
|
||||
outHost := make(core.HostSlice[bn254.Projective], 1)
|
||||
outHost.CopyFromDeviceAsync(&out, stream)
|
||||
out.FreeAsync(stream)
|
||||
|
||||
// sync the stream
|
||||
// sync the stream
|
||||
cr.SynchronizeStream(&stream)
|
||||
}
|
||||
})
|
||||
@@ -78,9 +89,9 @@ While the goroutine is locked to the host thread, the Go runtime will not assign
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `deviceId int`: The ID of the device on which to run the provided function. Device IDs start from 0.
|
||||
- `funcToRun func(args ...any)`: The function to be executed on the specified device.
|
||||
- `args ...any`: Arguments to be passed to `funcToRun`.
|
||||
- **`deviceId int`**: The ID of the device on which to run the provided function. Device IDs start from 0.
|
||||
- **`funcToRun func(args ...any)`**: The function to be executed on the specified device.
|
||||
- **`args ...any`**: Arguments to be passed to `funcToRun`.
|
||||
|
||||
**Behavior:**
|
||||
|
||||
@@ -102,11 +113,11 @@ Sets the active device for the current host thread. All subsequent CUDA calls ma
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `device int`: The ID of the device to set as the current device.
|
||||
- **`device int`**: The ID of the device to set as the current device.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `CudaError`: Error code indicating the success or failure of the operation.
|
||||
- **`CudaError`**: Error code indicating the success or failure of the operation.
|
||||
|
||||
### `GetDeviceCount`
|
||||
|
||||
@@ -114,7 +125,7 @@ Retrieves the number of CUDA-capable devices available on the host.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `(int, CudaError)`: The number of devices and an error code indicating the success or failure of the operation.
|
||||
- **`(int, CudaError)`**: The number of devices and an error code indicating the success or failure of the operation.
|
||||
|
||||
### `GetDevice`
|
||||
|
||||
@@ -122,7 +133,7 @@ Gets the ID of the currently active device for the calling host thread.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `(int, CudaError)`: The ID of the current device and an error code indicating the success or failure of the operation.
|
||||
- **`(int, CudaError)`**: The ID of the current device and an error code indicating the success or failure of the operation.
|
||||
|
||||
### `GetDeviceFromPointer`
|
||||
|
||||
@@ -130,10 +141,10 @@ Retrieves the device associated with a given pointer.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `ptr unsafe.Pointer`: Pointer to query.
|
||||
- **`ptr unsafe.Pointer`**: Pointer to query.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `int`: The device ID associated with the memory pointed to by `ptr`.
|
||||
- **`int`**: The device ID associated with the memory pointed to by `ptr`.
|
||||
|
||||
This documentation should provide a clear understanding of how to effectively manage multiple GPUs in Go applications using CUDA, with a particular emphasis on the `RunOnDevice` function for executing tasks on specific GPUs.
|
||||
@@ -10,31 +10,49 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254/fr/fft"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
// Obtain the default NTT configuration with a predefined coset generator.
|
||||
cfg := GetDefaultNttConfig()
|
||||
|
||||
// Define the size of the input scalars.
|
||||
size := 1 << 18
|
||||
func init() {
|
||||
cfg := bn254.GetDefaultNttConfig()
|
||||
initDomain(18, cfg)
|
||||
}
|
||||
|
||||
// Generate scalars for the NTT operation.
|
||||
scalars := GenerateScalars(size)
|
||||
func initDomain[T any](largestTestSize int, cfg core.NTTConfig[T]) core.IcicleError {
|
||||
rouMont, _ := fft.Generator(uint64(1 << largestTestSize))
|
||||
rou := rouMont.Bits()
|
||||
rouIcicle := bn254.ScalarField{}
|
||||
|
||||
// Set the direction of the NTT (forward or inverse).
|
||||
dir := core.KForward
|
||||
rouIcicle.FromLimbs(rou[:])
|
||||
e := bn254.InitDomain(rouIcicle, cfg.Ctx, false)
|
||||
return e
|
||||
}
|
||||
|
||||
// Allocate memory for the results of the NTT operation.
|
||||
results := make(core.HostSlice[ScalarField], size)
|
||||
func main() {
|
||||
// Obtain the default NTT configuration with a predefined coset generator.
|
||||
cfg := bn254.GetDefaultNttConfig()
|
||||
|
||||
// Perform the NTT operation.
|
||||
err := Ntt(scalars, dir, &cfg, results)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("NTT operation failed")
|
||||
}
|
||||
// Define the size of the input scalars.
|
||||
size := 1 << 18
|
||||
|
||||
// Generate scalars for the NTT operation.
|
||||
scalars := bn254.GenerateScalars(size)
|
||||
|
||||
// Set the direction of the NTT (forward or inverse).
|
||||
dir := core.KForward
|
||||
|
||||
// Allocate memory for the results of the NTT operation.
|
||||
results := make(core.HostSlice[bn254.ScalarField], size)
|
||||
|
||||
// Perform the NTT operation.
|
||||
err := bn254.Ntt(scalars, dir, &cfg, results)
|
||||
if err.CudaErrorCode != cr.CudaSuccess {
|
||||
panic("NTT operation failed")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -46,14 +64,14 @@ func Ntt[T any](scalars core.HostOrDeviceSlice, dir core.NTTDir, cfg *core.NTTCo
|
||||
|
||||
### Parameters
|
||||
|
||||
- **scalars**: A slice containing the input scalars for the transform. It can reside either in host memory or device memory.
|
||||
- **dir**: The direction of the NTT operation (`KForward` or `KInverse`).
|
||||
- **cfg**: A pointer to an `NTTConfig` object, containing configuration options for the NTT operation.
|
||||
- **results**: A slice where the results of the NTT operation will be stored. This slice can be in host or device memory.
|
||||
- **`scalars`**: A slice containing the input scalars for the transform. It can reside either in host memory or device memory.
|
||||
- **`dir`**: The direction of the NTT operation (`KForward` or `KInverse`).
|
||||
- **`cfg`**: A pointer to an `NTTConfig` object, containing configuration options for the NTT operation.
|
||||
- **`results`**: A slice where the results of the NTT operation will be stored. This slice can be in host or device memory.
|
||||
|
||||
### Return Value
|
||||
|
||||
- **CudaError**: Returns a CUDA error code indicating the success or failure of the NTT operation.
|
||||
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the NTT operation.
|
||||
|
||||
## NTT Configuration (NTTConfig)
|
||||
|
||||
@@ -75,15 +93,15 @@ type NTTConfig[T any] struct {
|
||||
|
||||
### Fields
|
||||
|
||||
- **Ctx**: Device context containing details like device ID and stream ID.
|
||||
- **CosetGen**: Coset generator used for coset (i)NTTs, defaulting to no coset being used.
|
||||
- **BatchSize**: The number of NTTs to compute in one operation, defaulting to 1.
|
||||
- **ColumnsBatch**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
|
||||
- **Ordering**: Ordering of inputs and outputs (`KNN`, `KNR`, `KRN`, `KRR`, `KMN`, `KNM`), affecting how data is arranged.
|
||||
- **areInputsOnDevice**: Indicates if input scalars are located on the device.
|
||||
- **areOutputsOnDevice**: Indicates if results are stored on the device.
|
||||
- **IsAsync**: Controls whether the NTT operation runs asynchronously.
|
||||
- **NttAlgorithm**: Explicitly select the NTT algorithm. Default value: Auto (the implementation selects radix-2 or mixed-radix algorithm based on heuristics).
|
||||
- **`Ctx`**: Device context containing details like device ID and stream ID.
|
||||
- **`CosetGen`**: Coset generator used for coset (i)NTTs, defaulting to no coset being used.
|
||||
- **`BatchSize`**: The number of NTTs to compute in one operation, defaulting to 1.
|
||||
- **`ColumnsBatch`**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
|
||||
- **`Ordering`**: Ordering of inputs and outputs (`KNN`, `KNR`, `KRN`, `KRR`, `KMN`, `KNM`), affecting how data is arranged.
|
||||
- **`areInputsOnDevice`**: Indicates if input scalars are located on the device.
|
||||
- **`areOutputsOnDevice`**: Indicates if results are stored on the device.
|
||||
- **`IsAsync`**: Controls whether the NTT operation runs asynchronously.
|
||||
- **`NttAlgorithm`**: Explicitly select the NTT algorithm. Default value: Auto (the implementation selects radix-2 or mixed-radix algorithm based on heuristics).
|
||||
|
||||
### Default Configuration
|
||||
|
||||
@@ -102,3 +120,36 @@ func InitDomain(primitiveRoot ScalarField, ctx cr.DeviceContext, fastTwiddles bo
|
||||
```
|
||||
|
||||
This function initializes the domain with a given primitive root, optionally using fast twiddle factors to optimize the computation.
|
||||
|
||||
### Releasing the domain
|
||||
|
||||
The `ReleaseDomain` function is responsible for releasing the resources associated with a specific domain in the CUDA device context.
|
||||
|
||||
```go
|
||||
func ReleaseDomain(ctx cr.DeviceContext) core.IcicleError
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`ctx`**: a reference to the `DeviceContext` object, which represents the CUDA device context.
|
||||
|
||||
### Return Value
|
||||
|
||||
The function returns a `core.IcicleError`, which represents the result of the operation. If the operation is successful, the function returns `core.IcicleErrorCode(0)`.
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/icicle-crypto/icicle-core/cr"
|
||||
"github.com/icicle-crypto/icicle-core/core"
|
||||
)
|
||||
|
||||
func example() {
|
||||
cfg := GetDefaultNttConfig()
|
||||
err := ReleaseDomain(cfg.Ctx)
|
||||
if err != nil {
|
||||
// Handle the error
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,105 +1,111 @@
|
||||
# Vector Operations
|
||||
|
||||
## Overview
|
||||
Icicle is exposing a number of vector operations which a user can control:
|
||||
* The VecOps API provides efficient vector operations such as addition, subtraction, and multiplication.
|
||||
* MatrixTranspose API allows a user to perform a transpose on a vector representation of a matrix
|
||||
|
||||
The VecOps API provides efficient vector operations such as addition, subtraction, and multiplication.
|
||||
|
||||
## Example
|
||||
## VecOps API Documentation
|
||||
### Example
|
||||
|
||||
### Vector addition
|
||||
#### Vector addition
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func main() {
|
||||
testSize := 1 << 12
|
||||
a := GenerateScalars(testSize)
|
||||
b := GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
testSize := 1 << 12
|
||||
a := bn254.GenerateScalars(testSize)
|
||||
b := bn254.GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[bn254.ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
|
||||
// Perform vector addition
|
||||
err := VecOp(a, b, out, cfg, core.Add)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector addition failed")
|
||||
}
|
||||
// Perform vector multiplication
|
||||
err := bn254.VecOp(a, b, out, cfg, core.Add)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector addition failed")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Vector Subtraction
|
||||
#### Vector Subtraction
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func main() {
|
||||
testSize := 1 << 12
|
||||
a := GenerateScalars(testSize)
|
||||
b := GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
testSize := 1 << 12
|
||||
a := bn254.GenerateScalars(testSize)
|
||||
b := bn254.GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[bn254.ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
|
||||
// Perform vector subtraction
|
||||
err := VecOp(a, b, out, cfg, core.Sub)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector subtraction failed")
|
||||
}
|
||||
// Perform vector multiplication
|
||||
err := bn254.VecOp(a, b, out, cfg, core.Sub)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector subtraction failed")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Vector Multiplication
|
||||
#### Vector Multiplication
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core"
|
||||
cr "github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
bn254 "github.com/ingonyama-zk/icicle/wrappers/golang/curves/bn254"
|
||||
)
|
||||
|
||||
func main() {
|
||||
testSize := 1 << 12
|
||||
a := GenerateScalars(testSize)
|
||||
b := GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
testSize := 1 << 12
|
||||
a := bn254.GenerateScalars(testSize)
|
||||
b := bn254.GenerateScalars(testSize)
|
||||
out := make(core.HostSlice[bn254.ScalarField], testSize)
|
||||
cfg := core.DefaultVecOpsConfig()
|
||||
|
||||
// Perform vector multiplication
|
||||
err := VecOp(a, b, out, cfg, core.Mul)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector multiplication failed")
|
||||
}
|
||||
// Perform vector multiplication
|
||||
err := bn254.VecOp(a, b, out, cfg, core.Mul)
|
||||
if err != cr.CudaSuccess {
|
||||
panic("Vector multiplication failed")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## VecOps Method
|
||||
### VecOps Method
|
||||
|
||||
```go
|
||||
func VecOp(a, b, out core.HostOrDeviceSlice, config core.VecOpsConfig, op core.VecOps) (ret cr.CudaError)
|
||||
```
|
||||
|
||||
### Parameters
|
||||
#### Parameters
|
||||
|
||||
- **a**: The first input vector.
|
||||
- **b**: The second input vector.
|
||||
- **out**: The output vector where the result of the operation will be stored.
|
||||
- **config**: A `VecOpsConfig` object containing various configuration options for the vector operations.
|
||||
- **op**: The operation to perform, specified as one of the constants (`Sub`, `Add`, `Mul`) from the `VecOps` type.
|
||||
- **`a`**: The first input vector.
|
||||
- **`b`**: The second input vector.
|
||||
- **`out`**: The output vector where the result of the operation will be stored.
|
||||
- **`config`**: A `VecOpsConfig` object containing various configuration options for the vector operations.
|
||||
- **`op`**: The operation to perform, specified as one of the constants (`Sub`, `Add`, `Mul`) from the `VecOps` type.
|
||||
|
||||
### Return Value
|
||||
#### Return Value
|
||||
|
||||
- **CudaError**: Returns a CUDA error code indicating the success or failure of the vector operation.
|
||||
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the vector operation.
|
||||
|
||||
## VecOpsConfig
|
||||
### VecOpsConfig
|
||||
|
||||
The `VecOpsConfig` structure holds configuration parameters for the vector operations, allowing customization of its behavior.
|
||||
|
||||
@@ -109,24 +115,72 @@ type VecOpsConfig struct {
|
||||
isAOnDevice bool
|
||||
isBOnDevice bool
|
||||
isResultOnDevice bool
|
||||
IsResultMontgomeryForm bool
|
||||
IsAsync bool
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
#### Fields
|
||||
|
||||
- **Ctx**: Device context containing details like device ID and stream ID.
|
||||
- **isAOnDevice**: Indicates if vector `a` is located on the device.
|
||||
- **isBOnDevice**: Indicates if vector `b` is located on the device.
|
||||
- **isResultOnDevice**: Specifies where the result vector should be stored (device or host memory).
|
||||
- **IsResultMontgomeryForm**: Determines if the result vector should be in Montgomery form.
|
||||
- **IsAsync**: Controls whether the vector operation runs asynchronously.
|
||||
|
||||
### Default Configuration
|
||||
#### Default Configuration
|
||||
|
||||
Use `DefaultVecOpsConfig` to obtain a default configuration, customizable as needed.
|
||||
|
||||
```go
|
||||
func DefaultVecOpsConfig() VecOpsConfig
|
||||
```
|
||||
|
||||
## MatrixTranspose API Documentation
|
||||
|
||||
This section describes the functionality of the `TransposeMatrix` function used for matrix transposition.
|
||||
|
||||
The function takes a matrix represented as a 1D slice and transposes it, storing the result in another 1D slice.
|
||||
|
||||
### Function
|
||||
|
||||
```go
|
||||
func TransposeMatrix(in, out core.HostOrDeviceSlice, columnSize, rowSize int, ctx cr.DeviceContext, onDevice, isAsync bool) (ret core.IcicleError)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
- **`in`**: The input matrix is a `core.HostOrDeviceSlice`, stored as a 1D slice.
|
||||
- **`out`**: The output matrix is a `core.HostOrDeviceSlice`, which will be the transpose of the input matrix, stored as a 1D slice.
|
||||
- **`columnSize`**: The number of columns in the input matrix.
|
||||
- **`rowSize`**: The number of rows in the input matrix.
|
||||
- **`ctx`**: The device context `cr.DeviceContext` to be used for the matrix transpose operation.
|
||||
- **`onDevice`**: Indicates whether the input and output slices are stored on the device (GPU) or the host (CPU).
|
||||
- **`isAsync`**: Indicates whether the matrix transpose operation should be executed asynchronously.
|
||||
|
||||
## Return Value
|
||||
|
||||
The function returns a `core.IcicleError` value, which represents the result of the matrix transpose operation. If the operation is successful, the returned value will be `0`.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```go
|
||||
var input = make(core.HostSlice[ScalarField], 20)
|
||||
var output = make(core.HostSlice[ScalarField], 20)
|
||||
|
||||
// Populate the input matrix
|
||||
// ...
|
||||
|
||||
// Get device context
|
||||
ctx, _ := cr.GetDefaultDeviceContext()
|
||||
|
||||
// Transpose the matrix
|
||||
err := TransposeMatrix(input, output, 5, 4, ctx, false, false)
|
||||
if err.IcicleErrorCode != core.IcicleErrorCode(0) {
|
||||
// Handle the error
|
||||
}
|
||||
|
||||
// Use the transposed matrix
|
||||
// ...
|
||||
```
|
||||
|
||||
In this example, the `TransposeMatrix` function is used to transpose a 5x4 matrix stored in a 1D slice. The input and output slices are stored on the host (CPU), and the operation is executed synchronously.
|
||||
@@ -1,6 +1,6 @@
|
||||
# ICICLE integrated provers
|
||||
|
||||
ICICLE has been used by companies and projects such as [Celer Network](https://github.com/celer-network), [Consensys Gnark](https://github.com/Consensys/gnark), [EZKL](https://blog.ezkl.xyz/post/acceleration/) and others to accelerate their ZK proving pipeline.
|
||||
ICICLE has been used by companies and projects such as [Celer Network](https://github.com/celer-network), [Consensys Gnark](https://github.com/Consensys/gnark), [EZKL](https://blog.ezkl.xyz/post/acceleration/), [ZKWASM](https://twitter.com/DelphinusLab/status/1762604988797513915) and others to accelerate their ZK proving pipeline.
|
||||
|
||||
Many of these integrations have been a collaboration between Ingonyama and the integrating company. We have learned a lot about designing GPU based ZK provers.
|
||||
|
||||
|
||||
@@ -8,24 +8,24 @@ This guide is oriented towards developers who want to start writing code with th
|
||||
|
||||
The diagram above displays the general architecture of ICICLE and the API layers that exist. The CUDA API, which we also call ICICLE Core, is the lowest level and is comprised of CUDA kernels which implement all primitives such as MSM as well as C++ wrappers which expose these methods for different curves.
|
||||
|
||||
ICICLE Core compiles into a static library. This library can be used with our official Golang and Rust wrappers or you can implement a wrapper for it in any language.
|
||||
ICICLE Core compiles into a static library. This library can be used with our official Golang and Rust wrappers or linked with your C++ project. You can also implement a wrapper for it in any other language.
|
||||
|
||||
Based on this dependency architecture, the ICICLE repository has three main sections, each of which is independent from the other.
|
||||
Based on this dependency architecture, the ICICLE repository has three main sections:
|
||||
|
||||
- ICICLE core
|
||||
- ICICLE Rust bindings
|
||||
- ICICLE Golang bindings
|
||||
- [ICICLE Core](#icicle-core)
|
||||
- [ICICLE Rust bindings](#icicle-rust-and-golang-bindings)
|
||||
- [ICICLE Golang bindings](#icicle-rust-and-golang-bindings)
|
||||
|
||||
### ICICLE Core
|
||||
|
||||
[ICICLE core](https://github.com/ingonyama-zk/icicle/tree/main/icicle) contains all the low level CUDA code implementing primitives such as [points](https://github.com/ingonyama-zk/icicle/tree/main/icicle/primitives) and [MSM](https://github.com/ingonyama-zk/icicle/tree/main/icicle/appUtils/msm). There also exists higher level C++ wrappers to expose the low level CUDA primitives ([example](https://github.com/ingonyama-zk/icicle/blob/c1a32a9879a7612916e05aa3098f76144de4109e/icicle/appUtils/msm/msm.cu#L1)).
|
||||
[ICICLE Core](/icicle/core) is a library that directly works with GPU by defining CUDA kernels and algorithms that invoke them. It contains code for [fast field arithmetic](https://github.com/ingonyama-zk/icicle/tree/main/icicle/include/field/field.cuh), cryptographic primitives used in ZK such as [NTT](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/ntt/), [MSM](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/msm/), [Poseidon Hash](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/poseidon/), [Polynomials](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/polynomials/) and others.
|
||||
|
||||
ICICLE Core would typically be compiled into a static library and used in a third party language such as Rust or Golang.
|
||||
ICICLE Core would typically be compiled into a static library and either used in a third party language such as Rust or Golang, or linked with your own C++ project.
|
||||
|
||||
### ICICLE Rust and Golang bindings
|
||||
|
||||
- [ICICLE Rust bindings](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust)
|
||||
- [ICICLE Golang bindings](https://github.com/ingonyama-zk/icicle/tree/main/goicicle)
|
||||
- [ICICLE Rust bindings](/icicle/rust-bindings)
|
||||
- [ICICLE Golang bindings](/icicle/golang-bindings)
|
||||
|
||||
These bindings allow you to easily use ICICLE in a Rust or Golang project. Setting up Golang bindings requires a bit of extra steps compared to the Rust bindings which utilize the `cargo build` tool.
|
||||
|
||||
@@ -33,6 +33,12 @@ These bindings allow you to easily use ICICLE in a Rust or Golang project. Setti
|
||||
|
||||
This guide assumes that you have a Linux or Windows machine with an Nvidia GPU installed. If you don't have access to an Nvidia GPU you can access one for free on [Google Colab](https://colab.google/).
|
||||
|
||||
:::info note
|
||||
|
||||
ICICLE can only run on Linux or Windows. **MacOS is not supported**.
|
||||
|
||||
:::
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- NVCC (version 12.0 or newer)
|
||||
@@ -50,9 +56,9 @@ If you don't wish to install these prerequisites you can follow this tutorial us
|
||||
|
||||
### Setting up ICICLE and running tests
|
||||
|
||||
The objective of this guide is to make sure you can run the ICICLE Core, Rust and Golang tests. Achieving this will ensure you know how to setup ICICLE and run a ICICLE program. For simplicity, we will be using the ICICLE docker container as our environment, however, you may install the prerequisites on your machine and follow the same commands in your terminal.
|
||||
The objective of this guide is to make sure you can run the ICICLE Core, Rust and Golang tests. Achieving this will ensure you know how to setup ICICLE and run an ICICLE program. For simplicity, we will be using the ICICLE docker container as our environment, however, you may install the prerequisites on your machine and [skip](#icicle-core-1) the docker section.
|
||||
|
||||
#### Setting up our environment
|
||||
#### Setting up environment with Docker
|
||||
|
||||
Lets begin by cloning the ICICLE repository:
|
||||
|
||||
@@ -105,29 +111,23 @@ ICICLE Core is found under [`<project_root>/icicle`](https://github.com/ingonyam
|
||||
cd icicle
|
||||
```
|
||||
|
||||
We are going to compile ICICLE for a specific curve
|
||||
For this example, we are going to compile ICICLE for a `bn254` curve. However other compilation strategies are supported.
|
||||
|
||||
```sh
|
||||
mkdir -p build
|
||||
cmake -S . -B build -DCURVE=bn254 -DBUILD_TESTS=ON
|
||||
cmake --build build
|
||||
cmake --build build -j
|
||||
```
|
||||
|
||||
`-DBUILD_TESTS=ON` compiles the tests, without this flag `ctest` won't work.
|
||||
`-DCURVE=bn254` tells the compiler which curve to build. You can find a list of supported curves [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/curves).
|
||||
`-DBUILD_TESTS` option compiles the tests, without this flag `ctest` won't work.
|
||||
`-DCURVE` option tells the compiler which curve to build. You can find a list of supported curves [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/cmake/CurvesCommon.cmake#L2).
|
||||
|
||||
The output in `build` folder should include the static libraries for the compiled curve.
|
||||
|
||||
:::info
|
||||
|
||||
Make sure to only use `-DBUILD_TESTS=ON` for running tests as the archive output will only be available when `-DBUILD_TESTS=ON` is not supplied.
|
||||
|
||||
:::
|
||||
|
||||
To run the test
|
||||
|
||||
```sh
|
||||
cd build
|
||||
cd build/tests
|
||||
ctest
|
||||
```
|
||||
|
||||
@@ -169,8 +169,24 @@ Golang is WIP in v1, coming soon. Please checkout a previous [release v0.1.0](ht
|
||||
|
||||
### Running ICICLE examples
|
||||
|
||||
ICICLE examples can be found [here](https://github.com/ingonyama-zk/icicle-examples) these examples cover some simple use cases using C++, rust and golang.
|
||||
ICICLE examples can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/examples) these examples cover some simple use cases using C++, rust and golang.
|
||||
|
||||
Lets run one of our C++ examples, in this case the [MSM example](https://github.com/ingonyama-zk/icicle/blob/main/examples/c%2B%2B/msm/example.cu).
|
||||
|
||||
```sh
|
||||
cd examples/c++/msm
|
||||
./compile.sh
|
||||
./run.sh
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
Read through the compile.sh and CMakeLists.txt to understand how to link your own C++ project with ICICLE
|
||||
|
||||
:::
|
||||
|
||||
|
||||
#### Running with Docker
|
||||
In each example directory, ZK-container files are located in a subdirectory `.devcontainer`.
|
||||
|
||||
```sh
|
||||
@@ -180,21 +196,6 @@ msm/
|
||||
└── Dockerfile
|
||||
```
|
||||
|
||||
Lets run one of our C++ examples, in this case the [MSM example](https://github.com/ingonyama-zk/icicle-examples/blob/main/c%2B%2B/msm/example.cu).
|
||||
|
||||
Clone the repository
|
||||
|
||||
```sh
|
||||
git clone https://github.com/ingonyama-zk/icicle-examples.git
|
||||
cd icicle-examples
|
||||
```
|
||||
|
||||
Enter the test directory
|
||||
|
||||
```sh
|
||||
cd c++/msm
|
||||
```
|
||||
|
||||
Now lets build our docker file and run the test inside it. Make sure you have installed the [optional prerequisites](#optional-prerequisites).
|
||||
|
||||
```sh
|
||||
@@ -207,54 +208,11 @@ Lets start and enter the container
|
||||
docker run -it --rm --gpus all -v .:/icicle-example icicle-example-msm
|
||||
```
|
||||
|
||||
to run the example
|
||||
Inside the container you can run the same commands:
|
||||
|
||||
```sh
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
./build/example
|
||||
./compile.sh
|
||||
./run.sh
|
||||
```
|
||||
|
||||
You can now experiment with our other examples, perhaps try to run a rust or golang example next.
|
||||
|
||||
## Writing new bindings for ICICLE
|
||||
|
||||
Since ICICLE Core is written in CUDA / C++ its really simple to generate static libraries. These static libraries can be installed on any system and called by higher level languages such as Golang.
|
||||
|
||||
static libraries can be loaded into memory once and used by multiple programs, reducing memory usage and potentially improving performance. They also allow you to separate functionality into distinct modules so your static library may need to compile only specific features that you want to use.
|
||||
|
||||
Lets review the Golang bindings since its a pretty verbose example (compared to rust which hides it pretty well) of using static libraries. Golang has a library named `CGO` which can be used to link static libraries. Here's a basic example on how you can use cgo to link these libraries:
|
||||
|
||||
```go
|
||||
/*
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lbn254 -lbls12_381 -lbls12_377 -lbw6_671
|
||||
#include "icicle.h" // make sure you use the correct header file(s)
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
// Now you can call the C functions from the ICICLE libraries.
|
||||
// Note that C function calls are prefixed with 'C.' in Go code.
|
||||
|
||||
out := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BN254_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bn254(out, in)
|
||||
}
|
||||
```
|
||||
|
||||
The comments on the first line tell `CGO` which libraries to import as well as which header files to include. You can then call methods which are part of the static library and defined in the header file, `C.projective_from_affine_bn254` is an example.
|
||||
|
||||
If you wish to create your own bindings for a language of your choice we suggest you start by investigating how you can call static libraries.
|
||||
|
||||
### ICICLE Adapters
|
||||
|
||||
One of the core ideas behind ICICLE is that developers can gradually accelerate their provers. Many protocols are written using other cryptographic libraries and completely replacing them may be complex and time consuming.
|
||||
|
||||
Therefore we offer adapters for various popular libraries, these adapters allow us to convert points and scalars between different formats defined by various libraries. Here is a list:
|
||||
|
||||
Golang adapters:
|
||||
|
||||
- [Gnark crypto adapter](https://github.com/ingonyama-zk/iciclegnark)
|
||||
You can now experiment with our other examples, perhaps try to run a rust or golang example next.
|
||||
27
docs/docs/icicle/polynomials/ffi.uml
Normal file
27
docs/docs/icicle/polynomials/ffi.uml
Normal file
@@ -0,0 +1,27 @@
|
||||
@startuml
|
||||
skinparam componentStyle uml2
|
||||
|
||||
' Define Components
|
||||
component "C++ Template\nComponent" as CppTemplate {
|
||||
[Parameterizable Interface]
|
||||
}
|
||||
component "C API Wrapper\nComponent" as CApiWrapper {
|
||||
[C API Interface]
|
||||
}
|
||||
component "Rust Code\nComponent" as RustCode {
|
||||
[Macro Interface\n(Template Instantiation)]
|
||||
}
|
||||
|
||||
' Define Artifact
|
||||
artifact "Static Library\n«artifact»" as StaticLib
|
||||
|
||||
' Connections
|
||||
CppTemplate -down-> CApiWrapper : Instantiates
|
||||
CApiWrapper .down.> StaticLib : Compiles into
|
||||
RustCode -left-> StaticLib : Links against\nand calls via FFI
|
||||
|
||||
' Notes
|
||||
note right of CppTemplate : Generic C++\ntemplate implementation
|
||||
note right of CApiWrapper : Exposes C API for FFI\nto Rust/Go
|
||||
note right of RustCode : Uses macros to\ninstantiate templates
|
||||
@enduml
|
||||
86
docs/docs/icicle/polynomials/hw_backends.uml
Normal file
86
docs/docs/icicle/polynomials/hw_backends.uml
Normal file
@@ -0,0 +1,86 @@
|
||||
@startuml
|
||||
|
||||
' Define Interface for Polynomial Backend Operations
|
||||
interface IPolynomialBackend {
|
||||
+add()
|
||||
+subtract()
|
||||
+multiply()
|
||||
+divide()
|
||||
+evaluate()
|
||||
}
|
||||
|
||||
' Define Interface for Polynomial Context (State Management)
|
||||
interface IPolynomialContext {
|
||||
+initFromCoeffs()
|
||||
+initFromEvals()
|
||||
+getCoeffs()
|
||||
+getEvals()
|
||||
}
|
||||
|
||||
' PolynomialAPI now uses two strategies: Backend and Context
|
||||
class PolynomialAPI {
|
||||
-backendStrategy: IPolynomialBackend
|
||||
-contextStrategy: IPolynomialContext
|
||||
-setBackendStrategy(IPolynomialBackend)
|
||||
-setContextStrategy(IPolynomialContext)
|
||||
+add()
|
||||
+subtract()
|
||||
+multiply()
|
||||
+divide()
|
||||
+evaluate()
|
||||
}
|
||||
|
||||
' Backend Implementations
|
||||
class GPUPolynomialBackend implements IPolynomialBackend {
|
||||
#gpuResources: Resource
|
||||
+add()
|
||||
+subtract()
|
||||
+multiply()
|
||||
+divide()
|
||||
+evaluate()
|
||||
}
|
||||
|
||||
class ZPUPolynomialBackend implements IPolynomialBackend {
|
||||
#zpuResources: Resource
|
||||
+add()
|
||||
+subtract()
|
||||
+multiply()
|
||||
+divide()
|
||||
+evaluate()
|
||||
}
|
||||
|
||||
class TracerPolynomialBackend implements IPolynomialBackend {
|
||||
#traceData: Data
|
||||
+add()
|
||||
+subtract()
|
||||
+multiply()
|
||||
+divide()
|
||||
+evaluate()
|
||||
}
|
||||
|
||||
' Context Implementations (Placeholder for actual implementation)
|
||||
class GPUContext implements IPolynomialContext {
|
||||
+initFromCoeffs()
|
||||
+initFromEvals()
|
||||
+getCoeffs()
|
||||
+getEvals()
|
||||
}
|
||||
|
||||
class ZPUContext implements IPolynomialContext {
|
||||
+initFromCoeffs()
|
||||
+initFromEvals()
|
||||
+getCoeffs()
|
||||
+getEvals()
|
||||
}
|
||||
|
||||
class TracerContext implements IPolynomialContext {
|
||||
+initFromCoeffs()
|
||||
+initFromEvals()
|
||||
+getCoeffs()
|
||||
+getEvals()
|
||||
}
|
||||
|
||||
' Relationships
|
||||
PolynomialAPI o-- IPolynomialBackend : uses
|
||||
PolynomialAPI o-- IPolynomialContext : uses
|
||||
@enduml
|
||||
373
docs/docs/icicle/polynomials/overview.md
Normal file
373
docs/docs/icicle/polynomials/overview.md
Normal file
@@ -0,0 +1,373 @@
|
||||
# Polynomial API Overview
|
||||
|
||||
## Introduction
|
||||
|
||||
The Polynomial API offers a robust framework for polynomial operations within a computational environment. It's designed for flexibility and efficiency, supporting a broad range of operations like arithmetic, evaluation, and manipulation, all while abstracting from the computation and storage specifics. This enables adaptability to various backend technologies, employing modern C++ practices.
|
||||
|
||||
## Key Features
|
||||
|
||||
### Backend Agnostic Architecture
|
||||
Our API is structured to be independent of any specific computational backend. While a CUDA backend is currently implemented, the architecture facilitates easy integration of additional backends. This capability allows users to perform polynomial operations without the need to tailor their code to specific hardware, enhancing code portability and scalability.
|
||||
|
||||
### Templating in the Polynomial API
|
||||
|
||||
The Polynomial API is designed with a templated structure to accommodate different data types for coefficients, the domain, and images. This flexibility allows the API to be adapted for various computational needs and types of data.
|
||||
|
||||
```cpp
|
||||
template <typename Coeff, typename Domain = Coeff, typename Image = Coeff>
|
||||
class Polynomial {
|
||||
// Polynomial class definition
|
||||
}
|
||||
```
|
||||
|
||||
In this template:
|
||||
|
||||
- **`Coeff`**: Represents the type of the coefficients of the polynomial.
|
||||
- **`Domain`**: Specifies the type for the input values over which the polynomial is evaluated. By default, it is the same as the type of the coefficients but can be specified separately to accommodate different computational contexts.
|
||||
- **`Image`**: Defines the type of the output values of the polynomial. This is typically the same as the coefficients.
|
||||
|
||||
#### Default instantiation
|
||||
```cpp
|
||||
extern template class Polynomial<scalar_t>;
|
||||
```
|
||||
|
||||
#### Extended use cases
|
||||
The templated nature of the Polynomial API also supports more complex scenarios. For example, coefficients and images could be points on an elliptic curve (EC points), which are useful in cryptographic applications and advanced algebraic structures. This approach allows the API to be extended easily to support new algebraic constructions without modifying the core implementation.
|
||||
|
||||
### Supported Operations
|
||||
The Polynomial class encapsulates a polynomial, providing a variety of operations:
|
||||
- **Construction**: Create polynomials from coefficients or evaluations on roots-of-unity domains.
|
||||
- **Arithmetic Operations**: Perform addition, subtraction, multiplication, and division.
|
||||
- **Evaluation**: Directly evaluate polynomials at specific points or across a domain.
|
||||
- **Manipulation**: Features like slicing polynomials, adding or subtracting monomials inplace, and computing polynomial degrees.
|
||||
- **Memory Access**: Access internal states or obtain device-memory views of polynomials.
|
||||
|
||||
## Usage
|
||||
|
||||
This section outlines how to use the Polynomial API in C++. Bindings for Rust and Go are detailed under the Bindings sections.
|
||||
|
||||
### Backend Initialization
|
||||
Initialization with an appropriate factory is required to configure the computational context and backend.
|
||||
|
||||
```cpp
|
||||
#include "polynomials/polynomials.h"
|
||||
#include "polynomials/cuda_backend/polynomial_cuda_backend.cuh"
|
||||
|
||||
// Initialize with a CUDA backend
|
||||
Polynomial::initialize(std::make_shared<CUDAPolynomialFactory>());
|
||||
```
|
||||
|
||||
:::note Icicle is built to a library per field/curve. Initialization must be done per library. That is, applications linking to multiple curves/fields should do it per curve/field.
|
||||
:::
|
||||
|
||||
### Construction
|
||||
Polynomials can be constructed from coefficients, from evaluations on roots-of-unity domains, or by cloning existing polynomials.
|
||||
|
||||
```cpp
|
||||
// Construction
|
||||
static Polynomial from_coefficients(const Coeff* coefficients, uint64_t nof_coefficients);
|
||||
static Polynomial from_rou_evaluations(const Image* evaluations, uint64_t nof_evaluations);
|
||||
// Clone the polynomial
|
||||
Polynomial clone() const;
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```cpp
|
||||
auto p_from_coeffs = Polynomial_t::from_coefficients(coeff /* :scalar_t* */, nof_coeffs);
|
||||
auto p_from_rou_evals = Polynomial_t::from_rou_evaluations(rou_evals /* :scalar_t* */, nof_evals);
|
||||
auto p_cloned = p.clone(); // p_cloned and p do not share memory
|
||||
```
|
||||
|
||||
:::note
|
||||
The coefficients or evaluations may be allocated either on host or device memory. In both cases the memory is copied to backend device.
|
||||
:::
|
||||
|
||||
### Arithmetic
|
||||
Constructed polynomials can be used for various arithmetic operations:
|
||||
|
||||
```cpp
|
||||
// Addition
|
||||
Polynomial operator+(const Polynomial& rhs) const;
|
||||
Polynomial& operator+=(const Polynomial& rhs); // inplace addition
|
||||
|
||||
// Subtraction
|
||||
Polynomial operator-(const Polynomial& rhs) const;
|
||||
|
||||
// Multiplication
|
||||
Polynomial operator*(const Polynomial& rhs) const;
|
||||
Polynomial operator*(const Domain& scalar) const; // scalar multiplication
|
||||
|
||||
// Division A(x) = B(x)Q(x) + R(x)
|
||||
std::pair<Polynomial, Polynomial> divide(const Polynomial& rhs) const; // returns (Q(x), R(x))
|
||||
Polynomial operator/(const Polynomial& rhs) const; // returns quotient Q(x)
|
||||
Polynomial operator%(const Polynomial& rhs) const; // returns remainder R(x)
|
||||
Polynomial divide_by_vanishing_polynomial(uint64_t degree) const; // sdivision by the vanishing polynomial V(x)=X^N-1
|
||||
```
|
||||
|
||||
#### Example:
|
||||
Given polynomials A(x),B(x),C(x) and V(x) the vanishing polynomial.
|
||||
|
||||
$$
|
||||
H(x)=\frac{A(x) \cdot B(x) - C(x)}{V(x)} \space where \space V(x) = X^{N}-1
|
||||
$$
|
||||
|
||||
```cpp
|
||||
auto H = (A*B-C).divide_by_vanishing_polynomial(N);
|
||||
```
|
||||
|
||||
### Evaluation
|
||||
Evaluate polynomials at arbitrary domain points or across a domain.
|
||||
|
||||
```cpp
|
||||
Image operator()(const Domain& x) const; // evaluate f(x)
|
||||
void evaluate(const Domain* x, Image* evals /*OUT*/) const;
|
||||
void evaluate_on_domain(Domain* domain, uint64_t size, Image* evals /*OUT*/) const; // caller allocates memory
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```cpp
|
||||
Coeff x = rand();
|
||||
Image f_x = f(x); // evaluate f at x
|
||||
|
||||
// evaluate f(x) on a domain
|
||||
uint64_t domain_size = ...;
|
||||
auto domain = /*build domain*/; // host or device memory
|
||||
auto evaluations = std::make_unique<scalar_t[]>(domain_size); // can be device memory too
|
||||
f.evaluate_on_domain(domain, domain_size, evaluations);
|
||||
```
|
||||
|
||||
:::note For special domains such as roots of unity this method is not the most efficient for two reasons:
|
||||
- Need to build the domain of size N.
|
||||
- The implementation is not trying to identify this special domain.
|
||||
|
||||
Therefore the computation is typically $O(n^2)$ rather than $O(nlogn)$.
|
||||
See the 'device views' section for more details.
|
||||
:::
|
||||
|
||||
|
||||
### Manipulations
|
||||
Beyond arithmetic, the API supports efficient polynomial manipulations:
|
||||
|
||||
#### Monomials
|
||||
```cpp
|
||||
// Monomial operations
|
||||
Polynomial& add_monomial_inplace(Coeff monomial_coeff, uint64_t monomial = 0);
|
||||
Polynomial& sub_monomial_inplace(Coeff monomial_coeff, uint64_t monomial = 0);
|
||||
```
|
||||
|
||||
The ability to add or subtract monomials directly and in-place is an efficient way to manipualte polynomials.
|
||||
|
||||
Example:
|
||||
```cpp
|
||||
f.add_monomial_in_place(scalar_t::from(5)); // f(x) += 5
|
||||
f.sub_monomial_in_place(scalar_t::from(3), 8); // f(x) -= 3x^8
|
||||
```
|
||||
|
||||
#### Computing the degree of a Polynomial
|
||||
```cpp
|
||||
// Degree computation
|
||||
int64_t degree();
|
||||
```
|
||||
|
||||
The degree of a polynomial is a fundamental characteristic that describes the highest power of the variable in the polynomial expression with a non-zero coefficient.
|
||||
The `degree()` function in the API returns the degree of the polynomial, corresponding to the highest exponent with a non-zero coefficient.
|
||||
|
||||
- For the polynomial $f(x) = x^5 + 2x^3 + 4$, the degree is 5 because the highest power of $x$ with a non-zero coefficient is 5.
|
||||
- For a scalar value such as a constant term (e.g., $f(x) = 7$, the degree is considered 0, as it corresponds to $x^0$.
|
||||
- The degree of the zero polynomial, $f(x) = 0$, where there are no non-zero coefficients, is defined as -1. This special case often represents an "empty" or undefined state in many mathematical contexts.
|
||||
|
||||
Example:
|
||||
```cpp
|
||||
auto f = /*some expression*/;
|
||||
auto degree_of_f = f.degree();
|
||||
```
|
||||
|
||||
#### Slicing
|
||||
```cpp
|
||||
// Slicing and selecting even or odd components.
|
||||
Polynomial slice(uint64_t offset, uint64_t stride, uint64_t size = 0 /*0 means take all elements*/);
|
||||
Polynomial even();
|
||||
Polynomial odd();
|
||||
```
|
||||
|
||||
The Polynomial API provides methods for slicing polynomials and selecting specific components, such as even or odd indexed terms. Slicing allows extracting specific sections of a polynomial based on an offset, stride, and size.
|
||||
|
||||
The following examples demonstrate folding a polynomial's even and odd parts and arbitrary slicing;
|
||||
```cpp
|
||||
// folding a polynomials even and odd parts with randomness
|
||||
auto x = rand();
|
||||
auto even = f.even();
|
||||
auto odd = f.odd();
|
||||
auto fold_poly = even + odd * x;
|
||||
|
||||
// arbitrary slicing (first quarter)
|
||||
auto first_quarter = f.slice(0 /*offset*/, 1 /*stride*/, f.degree()/4 /*size*/);
|
||||
```
|
||||
|
||||
### Memory access (copy/view)
|
||||
Access to the polynomial's internal state can be vital for operations like commitment schemes or when more efficient custom operations are necessary. This can be done in one of two ways:
|
||||
- **Copy** the coefficients or evaluations to user allocated memory or
|
||||
- **View** into the device memory without copying.
|
||||
|
||||
#### Copy
|
||||
Copy the polynomial coefficients to either host or device allocated memory.
|
||||
:::note copying to host memory is backend agnostic while copying to device memory requires the memory to be allocated on the corresponding backend.
|
||||
:::
|
||||
|
||||
```cpp
|
||||
Coeff get_coeff(uint64_t idx) const; // copy single coefficient to host
|
||||
uint64_t copy_coeffs(Coeff* coeffs, uint64_t start_idx, uint64_t end_idx) const;
|
||||
```
|
||||
|
||||
Example:
|
||||
```cpp
|
||||
auto coeffs_device = /*allocate CUDA or host memory*/
|
||||
f.copy_coeffs(coeffs_device, 0/*start*/, f.degree());
|
||||
|
||||
MSMConfig cfg = msm::defaultMSMConfig();
|
||||
cfg.are_points_on_device = true; // assuming copy to device memory
|
||||
auto rv = msm::MSM(coeffs_device, points, msm_size, cfg, results);
|
||||
```
|
||||
|
||||
#### Views
|
||||
The Polynomial API supports efficient data handling through the use of memory views. These views provide direct access to the polynomial's internal state, such as coefficients or evaluations, without the need to copy data. This feature is particularly useful for operations that require direct access to device memory, enhancing both performance and memory efficiency.
|
||||
|
||||
##### What is a Memory View?
|
||||
|
||||
A memory view is essentially a pointer to data stored in device memory. By providing a direct access pathway to the data, it eliminates the need for data duplication, thus conserving both time and system resources. This is especially beneficial in high-performance computing environments where data size and operation speed are critical factors.
|
||||
|
||||
##### Applications of Memory Views
|
||||
|
||||
Memory views are extremely versatile and can be employed in various computational contexts such as:
|
||||
|
||||
- **Commitments**: Views can be used to commit polynomial states in cryptographic schemes, such as Multi-Scalar Multiplications (MSM), or for constructing Merkle trees without duplicating the underlying data.
|
||||
- **External Computations**: They allow external functions or algorithms to utilize the polynomial's data directly, facilitating operations outside the core polynomial API. This is useful for custom operations that are not covered by the API.
|
||||
|
||||
##### Obtaining and Using Views
|
||||
|
||||
To create and use views within the Polynomial API, functions are provided to obtain pointers to both coefficients and evaluation data. Here’s how they are generally structured:
|
||||
|
||||
```cpp
|
||||
// Obtain a view of the polynomial's coefficients
|
||||
std::tuple<IntegrityPointer<Coeff>, uint64_t /*size*/, uint64_t /*device_id*/> get_coefficients_view();
|
||||
// obtain a view of the evaluations. Can specify the domain size and whether to compute reversed evaluations.
|
||||
std::tuple<IntegrityPointer<Image>, uint64_t /*size*/, uint64_t /*device_id*/>
|
||||
get_rou_evaluations_view(uint64_t nof_evaluations = 0, bool is_reversed = false);
|
||||
```
|
||||
|
||||
Example usage:
|
||||
|
||||
```cpp
|
||||
auto [coeffs_view, size, device_id] = polynomial.get_coefficients_view();
|
||||
|
||||
// Use coeffs_view in a computational routine that requires direct access to polynomial coefficients
|
||||
// Example: Passing the view to a GPU-accelerated function
|
||||
gpu_accelerated_function(coeffs_view.get(),...);
|
||||
```
|
||||
|
||||
##### Integrity-Pointer: Managing Memory Views
|
||||
Within the Polynomial API, memory views are managed through a specialized tool called the Integrity-Pointer. This pointer type is designed to safeguard operations by monitoring the validity of the memory it points to. It can detect if the memory has been modified or released, thereby preventing unsafe access to stale or non-existent data.
|
||||
The Integrity-Pointer not only acts as a regular pointer but also provides additional functionality to ensure the integrity of the data it references. Here are its key features:
|
||||
|
||||
```cpp
|
||||
// Checks whether the pointer is still considered valid
|
||||
bool isValid() const;
|
||||
|
||||
// Retrieves the raw pointer or nullptr if pointer is invalid
|
||||
const T* get() const;
|
||||
|
||||
// Dereferences the pointer. Throws exception if the pointer is invalid.
|
||||
const T& operator*() const;
|
||||
|
||||
//Provides access to the member of the pointed-to object. Throws exception if the pointer is invalid.
|
||||
const T* operator->() const;
|
||||
```
|
||||
|
||||
Consider the Following case:
|
||||
|
||||
```cpp
|
||||
auto [coeff_view, size, device] = f.get_coefficients_view();
|
||||
|
||||
// Use the coefficients view to perform external operations
|
||||
commit_to_polynomial(coeff_view.get(), size);
|
||||
|
||||
// Modification of the original polynomial
|
||||
f += g; // Any operation that modifies 'f' potentially invalidates 'coeff_view'
|
||||
|
||||
// Check if the view is still valid before using it further
|
||||
if (coeff_view.isValid()) {
|
||||
perform_additional_computation(coeff_view.get(), size);
|
||||
} else {
|
||||
handle_invalid_data();
|
||||
}
|
||||
```
|
||||
|
||||
#### Evaluations View: Accessing Polynomial Evaluations Efficiently
|
||||
The Polynomial API offers a specialized method, `get_rou_evaluations_view(...)`, which facilitates direct access to the evaluations of a polynomial. This method is particularly useful for scenarios where polynomial evaluations need to be accessed frequently or manipulated externally without the overhead of copying data.
|
||||
This method provides a memory view into the device memory where polynomial evaluations are stored. It allows for efficient interpolation on larger domains, leveraging the raw evaluations directly from memory.
|
||||
:::warning
|
||||
Invalid request: requesting evaluations on a domain smaller than the degree of the polynomial is not supported and is considered invalid.
|
||||
:::
|
||||
|
||||
```cpp
|
||||
// Assume a polynomial `p` of degree N
|
||||
auto [evals_view, size, device_id] = p.get_rou_evaluations_view(4*N); // expanding the evaluation domain
|
||||
|
||||
// Use the evaluations view to perform further computations or visualizations
|
||||
process_polynomial_evaluations(evals_view.get(), size, device_id);
|
||||
```
|
||||
|
||||
## Multi-GPU Support with CUDA Backend
|
||||
|
||||
The Polynomial API includes comprehensive support for multi-GPU environments, a crucial feature for leveraging the full computational power of systems equipped with multiple NVIDIA GPUs. This capability is part of the API's CUDA backend, which is designed to efficiently manage polynomial computations across different GPUs.
|
||||
|
||||
### Setting the CUDA Device
|
||||
|
||||
Like other components of the icicle framework, the Polynomial API allows explicit setting of the current CUDA device:
|
||||
|
||||
```cpp
|
||||
cudaSetDevice(int deviceID);
|
||||
```
|
||||
|
||||
This function sets the active CUDA device. All subsequent operations that allocate or deal with polynomial data will be performed on this device.
|
||||
|
||||
### Allocation Consistency
|
||||
Polynomials are always allocated on the current CUDA device at the time of their creation. It is crucial to ensure that the device context is correctly set before initiating any operation that involves memory allocation:
|
||||
```cpp
|
||||
// Set the device before creating polynomials
|
||||
cudaSetDevice(0);
|
||||
Polynomial p1 = Polynomial::from_coefficients(coeffs, size);
|
||||
|
||||
cudaSetDevice(1);
|
||||
Polynomial p2 = Polynomial::from_coefficients(coeffs, size);
|
||||
```
|
||||
|
||||
### Matching Devices for Operations
|
||||
When performing operations that result in the creation of new polynomials (such as addition or multiplication), it is imperative that both operands are on the same CUDA device. If the operands reside on different devices, an exception is thrown:
|
||||
|
||||
```cpp
|
||||
// Ensure both operands are on the same device
|
||||
cudaSetDevice(0);
|
||||
auto p3 = p1 + p2; // Throws an exception if p1 and p2 are not on the same device
|
||||
```
|
||||
|
||||
### Device-Agnostic Operations
|
||||
Operations that do not involve the creation of new polynomials, such as computing the degree of a polynomial or performing in-place modifications, can be executed regardless of the current device setting:
|
||||
```cpp
|
||||
// 'degree' and in-place operations do not require device matching
|
||||
int deg = p1.degree();
|
||||
p1 += p2; // Valid if p1 and p2 are on the same device, throws otherwise
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
The API is designed to throw exceptions if operations are attempted across polynomials that are not located on the same GPU. This ensures that all polynomial operations are performed consistently and without data integrity issues due to device mismatches.
|
||||
|
||||
### Best Practices
|
||||
To maximize the performance and avoid runtime errors in a multi-GPU setup, always ensure that:
|
||||
|
||||
- The CUDA device is set correctly before polynomial allocation.
|
||||
- Operations involving new polynomial creation are performed with operands on the same device.
|
||||
|
||||
By adhering to these guidelines, developers can effectively harness the power of multiple GPUs to handle large-scale polynomial computations efficiently.
|
||||
35
docs/docs/icicle/rust-bindings/ecntt.md
Normal file
35
docs/docs/icicle/rust-bindings/ecntt.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# ECNTT
|
||||
|
||||
### Supported curves
|
||||
|
||||
`bls12-377`, `bls12-381`, `bn254`
|
||||
|
||||
## ECNTT Method
|
||||
|
||||
The `ecntt` function computes the Elliptic Curve Number Theoretic Transform (EC-NTT) or its inverse on a batch of points of a curve.
|
||||
|
||||
```rust
|
||||
pub fn ecntt<C: Curve>(
|
||||
input: &(impl HostOrDeviceSlice<Projective<C>> + ?Sized),
|
||||
dir: NTTDir,
|
||||
cfg: &NTTConfig<C::ScalarField>,
|
||||
output: &mut (impl HostOrDeviceSlice<Projective<C>> + ?Sized),
|
||||
) -> IcicleResult<()>
|
||||
where
|
||||
C::ScalarField: FieldImpl,
|
||||
<C::ScalarField as FieldImpl>::Config: ECNTT<C>,
|
||||
{
|
||||
// ... function implementation ...
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
- **`input`**: The input data as a slice of `Projective<C>`. This represents points on a specific elliptic curve `C`.
|
||||
- **`dir`**: The direction of the NTT. It can be `NTTDir::kForward` for forward NTT or `NTTDir::kInverse` for inverse NTT.
|
||||
- **`cfg`**: The NTT configuration object of type `NTTConfig<C::ScalarField>`. This object specifies parameters for the NTT computation, such as the batch size and algorithm to use.
|
||||
- **`output`**: The output buffer to write the results into. This should be a slice of `Projective<C>` with the same size as the input.
|
||||
|
||||
## Return Value
|
||||
|
||||
- **`IcicleResult<()>`**: This function returns an `IcicleResult` which is a wrapper type that indicates success or failure of the NTT computation. On success, it contains `Ok(())`.
|
||||
@@ -62,11 +62,11 @@ Sets the current CUDA device by its ID, when calling `set_device` it will set th
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `device_id: usize`: The ID of the device to set as the current device. Device IDs start from 0.
|
||||
- **`device_id: usize`**: The ID of the device to set as the current device. Device IDs start from 0.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `CudaResult<()>`: An empty result indicating success if the device is set successfully. In case of failure, returns a `CudaError`.
|
||||
- **`CudaResult<()>`**: An empty result indicating success if the device is set successfully. In case of failure, returns a `CudaError`.
|
||||
|
||||
**Errors:**
|
||||
|
||||
@@ -88,7 +88,7 @@ Retrieves the number of CUDA devices available on the machine.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `CudaResult<usize>`: The number of available CUDA devices. On success, contains the count of CUDA devices. On failure, returns a `CudaError`.
|
||||
- **`CudaResult<usize>`**: The number of available CUDA devices. On success, contains the count of CUDA devices. On failure, returns a `CudaError`.
|
||||
|
||||
**Errors:**
|
||||
|
||||
@@ -109,7 +109,7 @@ Retrieves the ID of the current CUDA device.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `CudaResult<usize>`: The ID of the current CUDA device. On success, contains the device ID. On failure, returns a `CudaError`.
|
||||
- **`CudaResult<usize>`**: The ID of the current CUDA device. On success, contains the device ID. On failure, returns a `CudaError`.
|
||||
|
||||
**Errors:**
|
||||
|
||||
@@ -191,7 +191,7 @@ Validates that the specified `device_id` matches the ID of the currently active
|
||||
|
||||
#### Behavior
|
||||
|
||||
- **Panics** if the `device_id` does not match the active device's ID, preventing cross-device operation errors.
|
||||
- **`Panics`** if the `device_id` does not match the active device's ID, preventing cross-device operation errors.
|
||||
|
||||
#### Example
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ fn main() {
|
||||
// Create a CUDA stream
|
||||
let stream = CudaStream::create().expect("Failed to create CUDA stream");
|
||||
let ctx = DeviceContext::default(); // Assuming default device context
|
||||
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx).unwrap();
|
||||
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
|
||||
|
||||
// Configure NTT
|
||||
let mut cfg = ntt::NTTConfig::default();
|
||||
@@ -61,10 +61,10 @@ pub fn ntt<F>(
|
||||
|
||||
`ntt:ntt` expects:
|
||||
|
||||
`input` - buffer to read the inputs of the NTT from. <br/>
|
||||
`dir` - whether to compute forward or inverse NTT. <br/>
|
||||
`cfg` - config used to specify extra arguments of the NTT. <br/>
|
||||
`output` - buffer to write the NTT outputs into. Must be of the same size as input.
|
||||
- **`input`** - buffer to read the inputs of the NTT from. <br/>
|
||||
- **`dir`** - whether to compute forward or inverse NTT. <br/>
|
||||
- **`cfg`** - config used to specify extra arguments of the NTT. <br/>
|
||||
- **`output`** - buffer to write the NTT outputs into. Must be of the same size as input.
|
||||
|
||||
The `input` and `output` buffers can be on device or on host. Being on host means that they will be transferred to device during runtime.
|
||||
|
||||
@@ -155,13 +155,13 @@ Deciding weather to use `batch NTT` vs `single NTT` is highly dependent on your
|
||||
Before performing NTT operations, its necessary to initialize the NTT domain, It only needs to be called once per GPU since the twiddles are cached.
|
||||
|
||||
```rust
|
||||
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx).unwrap();
|
||||
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
|
||||
```
|
||||
|
||||
### `initialize_domain`
|
||||
|
||||
```rust
|
||||
pub fn initialize_domain<F>(primitive_root: F, ctx: &DeviceContext) -> IcicleResult<()>
|
||||
pub fn initialize_domain<F>(primitive_root: F, ctx: &DeviceContext, fast_twiddles: bool) -> IcicleResult<()>
|
||||
where
|
||||
F: FieldImpl,
|
||||
<F as FieldImpl>::Config: NTT<F>;
|
||||
@@ -177,23 +177,32 @@ where
|
||||
|
||||
- **`IcicleResult<()>`**: Will return an error if the operation fails.
|
||||
|
||||
### `initialize_domain_fast_twiddles_mode`
|
||||
#### Parameters
|
||||
|
||||
Similar to `initialize_domain`, `initialize_domain_fast_twiddles_mode` is a faster implementation and can be used for larger NTTs.
|
||||
- **`primitive_root`**: The primitive root of unity, chosen based on the maximum NTT size required for the computations. It must be of an order that is a power of two. This root is used to generate twiddle factors that are essential for the NTT operations.
|
||||
|
||||
- **`ctx`**: A reference to a `DeviceContext` specifying which device and stream the computation should be executed on.
|
||||
|
||||
#### Returns
|
||||
|
||||
- **`IcicleResult<()>`**: Will return an error if the operation fails.
|
||||
|
||||
### Releaseing the domain
|
||||
|
||||
The `release_domain` function is responsible for releasing the resources associated with a specific domain in the CUDA device context.
|
||||
|
||||
```rust
|
||||
pub fn initialize_domain_fast_twiddles_mode<F>(primitive_root: F, ctx: &DeviceContext) -> IcicleResult<()>
|
||||
pub fn release_domain<F>(ctx: &DeviceContext) -> IcicleResult<()>
|
||||
where
|
||||
F: FieldImpl,
|
||||
<F as FieldImpl>::Config: NTT<F>;
|
||||
<F as FieldImpl>::Config: NTT<F>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
- **`primitive_root`**: The primitive root of unity, chosen based on the maximum NTT size required for the computations. It must be of an order that is a power of two. This root is used to generate twiddle factors that are essential for the NTT operations.
|
||||
|
||||
- **`ctx`**: A reference to a `DeviceContext` specifying which device and stream the computation should be executed on.
|
||||
|
||||
#### Returns
|
||||
|
||||
- **`IcicleResult<()>`**: Will return an error if the operation fails.
|
||||
The function returns an `IcicleResult<()>`, which represents the result of the operation. If the operation is successful, the function returns `Ok(())`, otherwise it returns an error.
|
||||
|
||||
|
||||
261
docs/docs/icicle/rust-bindings/polynomials.md
Normal file
261
docs/docs/icicle/rust-bindings/polynomials.md
Normal file
@@ -0,0 +1,261 @@
|
||||
:::note Please refer to the Polynomials overview page for a deep overview. This section is a brief description of the Rust FFI bindings.
|
||||
:::
|
||||
|
||||
# Rust FFI Bindings for Univariate Polynomial
|
||||
This documentation is designed to provide developers with a clear understanding of how to utilize the Rust bindings for polynomial operations efficiently and effectively, leveraging the robust capabilities of both Rust and C++ in their applications.
|
||||
|
||||
## Introduction
|
||||
The Rust FFI bindings for the Univariate Polynomial serve as a "shallow wrapper" around the underlying C++ implementation. These bindings provide a straightforward Rust interface that directly calls functions from a C++ library, effectively bridging Rust and C++ operations. The Rust layer handles simple interface translations without delving into complex logic or data structures, which are managed on the C++ side. This design ensures efficient data handling, memory management, and execution of polynomial operations directly via C++.
|
||||
Currently, these bindings are tailored specifically for polynomials where the coefficients, domain, and images are represented as scalar fields.
|
||||
|
||||
|
||||
## Initialization Requirements
|
||||
|
||||
Before utilizing any functions from the polynomial API, it is mandatory to initialize the appropriate polynomial backend (e.g., CUDA). Additionally, the NTT (Number Theoretic Transform) domain must also be initialized, as the CUDA backend relies on this for certain operations. Failing to properly initialize these components can result in errors.
|
||||
|
||||
:::note
|
||||
**Field-Specific Initialization Requirement**
|
||||
|
||||
The ICICLE library is structured such that each field or curve has its dedicated library implementation. As a result, initialization must be performed individually for each field or curve to ensure the correct setup and functionality of the library.
|
||||
:::
|
||||
|
||||
|
||||
## Core Trait: `UnivariatePolynomial`
|
||||
|
||||
The `UnivariatePolynomial` trait encapsulates the essential functionalities required for managing univariate polynomials in the Rust ecosystem. This trait standardizes the operations that can be performed on polynomials, regardless of the underlying implementation details. It allows for a unified approach to polynomial manipulation, providing a suite of methods that are fundamental to polynomial arithmetic.
|
||||
|
||||
### Trait Definition
|
||||
```rust
|
||||
pub trait UnivariatePolynomial
|
||||
where
|
||||
Self::Field: FieldImpl,
|
||||
Self::FieldConfig: FieldConfig,
|
||||
{
|
||||
type Field: FieldImpl;
|
||||
type FieldConfig: FieldConfig;
|
||||
|
||||
// Methods to create polynomials from coefficients or roots-of-unity evaluations.
|
||||
fn from_coeffs<S: HostOrDeviceSlice<Self::Field> + ?Sized>(coeffs: &S, size: usize) -> Self;
|
||||
fn from_rou_evals<S: HostOrDeviceSlice<Self::Field> + ?Sized>(evals: &S, size: usize) -> Self;
|
||||
|
||||
// Method to divide this polynomial by another, returning quotient and remainder.
|
||||
fn divide(&self, denominator: &Self) -> (Self, Self) where Self: Sized;
|
||||
|
||||
// Method to divide this polynomial by the vanishing polynomial 'X^N-1'.
|
||||
fn div_by_vanishing(&self, degree: u64) -> Self;
|
||||
|
||||
// Methods to add or subtract a monomial in-place.
|
||||
fn add_monomial_inplace(&mut self, monomial_coeff: &Self::Field, monomial: u64);
|
||||
fn sub_monomial_inplace(&mut self, monomial_coeff: &Self::Field, monomial: u64);
|
||||
|
||||
// Method to slice the polynomial, creating a sub-polynomial.
|
||||
fn slice(&self, offset: u64, stride: u64, size: u64) -> Self;
|
||||
|
||||
// Methods to return new polynomials containing only the even or odd terms.
|
||||
fn even(&self) -> Self;
|
||||
fn odd(&self) -> Self;
|
||||
|
||||
// Method to evaluate the polynomial at a given domain point.
|
||||
fn eval(&self, x: &Self::Field) -> Self::Field;
|
||||
|
||||
// Method to evaluate the polynomial over a domain and store the results.
|
||||
fn eval_on_domain<D: HostOrDeviceSlice<Self::Field> + ?Sized, E: HostOrDeviceSlice<Self::Field> + ?Sized>(
|
||||
&self,
|
||||
domain: &D,
|
||||
evals: &mut E,
|
||||
);
|
||||
|
||||
// Method to retrieve a coefficient at a specific index.
|
||||
fn get_coeff(&self, idx: u64) -> Self::Field;
|
||||
|
||||
// Method to copy coefficients into a provided slice.
|
||||
fn copy_coeffs<S: HostOrDeviceSlice<Self::Field> + ?Sized>(&self, start_idx: u64, coeffs: &mut S);
|
||||
|
||||
// Method to get the degree of the polynomial.
|
||||
fn degree(&self) -> i64;
|
||||
}
|
||||
```
|
||||
|
||||
## `DensePolynomial` Struct
|
||||
The DensePolynomial struct represents a dense univariate polynomial in Rust, leveraging a handle to manage its underlying memory within the CUDA device context. This struct acts as a high-level abstraction over complex C++ memory management practices, facilitating the integration of high-performance polynomial operations through Rust's Foreign Function Interface (FFI) bindings.
|
||||
|
||||
```rust
|
||||
pub struct DensePolynomial {
|
||||
handle: PolynomialHandle,
|
||||
}
|
||||
```
|
||||
|
||||
### Traits implementation and methods
|
||||
|
||||
#### `Drop`
|
||||
Ensures proper resource management by releasing the CUDA memory when a DensePolynomial instance goes out of scope. This prevents memory leaks and ensures that resources are cleaned up correctly, adhering to Rust's RAII (Resource Acquisition Is Initialization) principles.
|
||||
|
||||
#### `Clone`
|
||||
Provides a way to create a new instance of a DensePolynomial with its own unique handle, thus duplicating the polynomial data in the CUDA context. Cloning is essential since the DensePolynomial manages external resources, which cannot be safely shared across instances without explicit duplication.
|
||||
|
||||
#### Operator Overloading: `Add`, `Sub`, `Mul`, `Rem`, `Div`
|
||||
These traits are implemented for references to DensePolynomial (i.e., &DensePolynomial), enabling natural mathematical operations such as addition (+), subtraction (-), multiplication (*), division (/), and remainder (%). This syntactic convenience allows users to compose complex polynomial expressions in a way that is both readable and expressive.
|
||||
|
||||
#### Key Methods
|
||||
In addition to the traits, the following methods are implemented:
|
||||
|
||||
```rust
|
||||
impl DensePolynomial {
|
||||
pub fn init_cuda_backend() -> bool {...}
|
||||
// Returns a mutable slice of the polynomial coefficients on the device
|
||||
pub fn coeffs_mut_slice(&mut self) -> &mut DeviceSlice<F> {...}
|
||||
}
|
||||
```
|
||||
|
||||
:::note Might be consolidated with `UnivariatePolynomial` trait
|
||||
:::
|
||||
|
||||
## Flexible Memory Handling With `HostOrDeviceSlice`
|
||||
The DensePolynomial API is designed to accommodate a wide range of computational environments by supporting both host and device memory through the `HostOrDeviceSlice` trait. This approach ensures that polynomial operations can be seamlessly executed regardless of where the data resides, making the API highly adaptable and efficient for various hardware configurations.
|
||||
|
||||
### Overview of `HostOrDeviceSlice`
|
||||
The HostOrDeviceSlice is a Rust trait that abstracts over slices of memory that can either be on the host (CPU) or the device (GPU), as managed by CUDA. This abstraction is crucial for high-performance computing scenarios where data might need to be moved between different memory spaces depending on the operations being performed and the specific hardware capabilities available.
|
||||
|
||||
### Usage in API Functions
|
||||
Functions within the DensePolynomial API that deal with polynomial coefficients or evaluations use the HostOrDeviceSlice trait to accept inputs. This design allows the functions to be agnostic of the actual memory location of the data, whether it's in standard system RAM accessible by the CPU or in GPU memory accessible by CUDA cores.
|
||||
|
||||
```rust
|
||||
// Assume `coeffs` could either be in host memory or CUDA device memory
|
||||
let coeffs: DeviceSlice<F> = DeviceVec::<F>::cuda_malloc(coeffs_len).unwrap();
|
||||
let p_from_coeffs = PolynomialBabyBear::from_coeffs(&coeffs, coeffs.len());
|
||||
|
||||
// Similarly for evaluations from roots of unity
|
||||
let evals: HostSlice<F> = HostSlice::from_slice(&host_memory_evals);
|
||||
let p_from_evals = PolynomialBabyBear::from_rou_evals(&evals, evals.len());
|
||||
|
||||
// Same applies for any API that accepts HostOrDeviceSlice
|
||||
```
|
||||
|
||||
## Usage
|
||||
This section outlines practical examples demonstrating how to utilize the `DensePolynomial` Rust API. The API is flexible, supporting multiple scalar fields. Below are examples showing how to use polynomials defined over different fields and perform a variety of operations.
|
||||
|
||||
### Initialization and Basic Operations
|
||||
First, choose the appropriate field implementation for your polynomial operations, initializing the CUDA backend if necessary
|
||||
```rust
|
||||
use icicle_babybear::polynomials::DensePolynomial as PolynomialBabyBear;
|
||||
|
||||
// Initialize the CUDA backend for polynomial operations
|
||||
PolynomialBabyBear::init_cuda_backend();
|
||||
let f = PolynomialBabyBear::from_coeffs(...);
|
||||
|
||||
// now use f by calling the implemented traits
|
||||
|
||||
// For operations over another field, such as BN254
|
||||
use icicle_bn254::polynomials::DensePolynomial as PolynomialBn254;
|
||||
// Use PolynomialBn254 similarly
|
||||
```
|
||||
|
||||
### Creation
|
||||
Polynomials can be created from coefficients or evaluations:
|
||||
|
||||
```rust
|
||||
// Assume F is the field type (e.g. icicle_bn254::curve::ScalarField or a type parameter)
|
||||
let coeffs = ...;
|
||||
let p_from_coeffs = PolynomialBabyBear::from_coeffs(HostSlice::from_slice(&coeffs), size);
|
||||
|
||||
let evals = ...;
|
||||
let p_from_evals = PolynomialBabyBear::from_rou_evals(HostSlice::from_slice(&evals), size);
|
||||
|
||||
```
|
||||
|
||||
### Arithmetic Operations
|
||||
Utilize overloaded operators for intuitive mathematical expressions:
|
||||
|
||||
```rust
|
||||
let add = &f + &g; // Addition
|
||||
let sub = &f - &g; // Subtraction
|
||||
let mul = &f * &g; // Multiplication
|
||||
let mul_scalar = &f * &scalar; // Scalar multiplication
|
||||
```
|
||||
|
||||
### Division and Remainder
|
||||
Compute quotient and remainder or perform division by a vanishing polynomial:
|
||||
|
||||
```rust
|
||||
let (q, r) = f.divide(&g); // Compute both quotient and remainder
|
||||
let q = &f / &g; // Quotient
|
||||
let r = &f % &g; // Remainder
|
||||
|
||||
let h = f.div_by_vanishing(N); // Division by V(x) = X^N - 1
|
||||
|
||||
```
|
||||
|
||||
### Monomial Operations
|
||||
Add or subtract monomials in-place for efficient polynomial manipulation:
|
||||
|
||||
```rust
|
||||
f.add_monomial_inplace(&three, 1 /*monmoial*/); // Adds 3*x to f
|
||||
f.sub_monomial_inplace(&one, 0 /*monmoial*/); // Subtracts 1 from f
|
||||
```
|
||||
|
||||
### Slicing
|
||||
Extract specific components:
|
||||
|
||||
```rust
|
||||
let even = f.even(); // Polynomial of even-indexed terms
|
||||
let odd = f.odd(); // Polynomial of odd-indexed terms
|
||||
let arbitrary_slice = f.slice(offset, stride, size);
|
||||
```
|
||||
|
||||
### Evaluate
|
||||
Evaluate the polynoomial:
|
||||
|
||||
```rust
|
||||
let x = rand(); // Random field element
|
||||
let f_x = f.eval(&x); // Evaluate f at x
|
||||
|
||||
// Evaluate on a predefined domain
|
||||
let domain = [one, two, three];
|
||||
let mut host_evals = vec![ScalarField::zero(); domain.len()];
|
||||
f.eval_on_domain(HostSlice::from_slice(&domain), HostSlice::from_mut_slice(&mut host_evals));
|
||||
```
|
||||
|
||||
### Read coefficients
|
||||
Read or copy polynomial coefficients for further processing:
|
||||
|
||||
```rust
|
||||
let x_squared_coeff = f.get_coeff(2); // Coefficient of x^2
|
||||
|
||||
// Copy coefficients to a device-specific memory space
|
||||
let mut device_mem = DeviceVec::<Field>::cuda_malloc(coeffs.len()).unwrap();
|
||||
f.copy_coeffs(0, &mut device_mem[..]);
|
||||
```
|
||||
|
||||
### Polynomial Degree
|
||||
Determine the highest power of the variable with a non-zero coefficient:
|
||||
|
||||
```rust
|
||||
let deg = f.degree(); // Degree of the polynomial
|
||||
```
|
||||
|
||||
### Memory Management: Views (rust slices)
|
||||
Rust enforces correct usage of views at compile time, eliminating the need for runtime checks:
|
||||
|
||||
```rust
|
||||
let mut f = Poly::from_coeffs(HostSlice::from_slice(&coeffs), size);
|
||||
|
||||
// Obtain a mutable slice of coefficients as a DeviceSlice
|
||||
let coeffs_slice_dev = f.coeffs_mut_slice();
|
||||
|
||||
// Operations on f are restricted here due to mutable borrow of coeffs_slice_dev
|
||||
|
||||
// Compute evaluations or perform other operations directly using the slice
|
||||
// example: evaluate f on a coset of roots-of-unity. Computing from GPU to HOST/GPU
|
||||
let mut config: NTTConfig<'_, F> = NTTConfig::default();
|
||||
config.coset_gen = /*some coset gen*/;
|
||||
let mut coset_evals = vec![F::zero(); coeffs_slice_dev.len()];
|
||||
ntt(
|
||||
coeffs_slice_dev,
|
||||
NTTDir::kForward,
|
||||
&config,
|
||||
HostSlice::from_mut_slice(&mut coset_evals),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// now can f can be borrowed once again
|
||||
```
|
||||
@@ -74,7 +74,6 @@ pub struct VecOpsConfig<'a> {
|
||||
is_a_on_device: bool,
|
||||
is_b_on_device: bool,
|
||||
is_result_on_device: bool,
|
||||
is_result_montgomery_form: bool,
|
||||
pub is_async: bool,
|
||||
}
|
||||
```
|
||||
@@ -85,7 +84,6 @@ pub struct VecOpsConfig<'a> {
|
||||
- **`is_a_on_device`**: Indicates if the first operand vector resides in device memory.
|
||||
- **`is_b_on_device`**: Indicates if the second operand vector resides in device memory.
|
||||
- **`is_result_on_device`**: Specifies if the result vector should be stored in device memory.
|
||||
- **`is_result_montgomery_form`**: Determines if the result should be in Montgomery form.
|
||||
- **`is_async`**: Enables asynchronous operation. If `true`, operations are non-blocking; otherwise, they block the current thread.
|
||||
|
||||
### Default Configuration
|
||||
@@ -112,7 +110,6 @@ impl<'a> VecOpsConfig<'a> {
|
||||
is_a_on_device: false,
|
||||
is_b_on_device: false,
|
||||
is_result_on_device: false,
|
||||
is_result_montgomery_form: false,
|
||||
is_async: false,
|
||||
}
|
||||
}
|
||||
@@ -157,3 +154,64 @@ All operations are element-wise operations, and the results placed into the `res
|
||||
- **`add`**: Computes the element-wise sum of two vectors.
|
||||
- **`sub`**: Computes the element-wise difference between two vectors.
|
||||
- **`mul`**: Performs element-wise multiplication of two vectors.
|
||||
|
||||
|
||||
## MatrixTranspose API Documentation
|
||||
|
||||
This section describes the functionality of the `TransposeMatrix` function used for matrix transposition.
|
||||
|
||||
The function takes a matrix represented as a 1D slice and transposes it, storing the result in another 1D slice.
|
||||
|
||||
### Function
|
||||
|
||||
```rust
|
||||
pub fn transpose_matrix<F>(
|
||||
input: &HostOrDeviceSlice<F>,
|
||||
row_size: u32,
|
||||
column_size: u32,
|
||||
output: &mut HostOrDeviceSlice<F>,
|
||||
ctx: &DeviceContext,
|
||||
on_device: bool,
|
||||
is_async: bool,
|
||||
) -> IcicleResult<()>
|
||||
where
|
||||
F: FieldImpl,
|
||||
<F as FieldImpl>::Config: VecOps<F>
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`input`**: A slice representing the input matrix. The slice can be stored on either the host or the device.
|
||||
- **`row_size`**: The number of rows in the input matrix.
|
||||
- **`column_size`**: The number of columns in the input matrix.
|
||||
- **`output`**: A mutable slice to store the transposed matrix. The slice can be stored on either the host or the device.
|
||||
- **`ctx`**: A reference to the `DeviceContext`, which provides information about the device where the operation will be performed.
|
||||
- **`on_device`**: A boolean flag indicating whether the inputs and outputs are on the device.
|
||||
- **`is_async`**: A boolean flag indicating whether the operation should be performed asynchronously.
|
||||
|
||||
### Return Value
|
||||
|
||||
`Ok(())` if the operation is successful, or an `IcicleResult` error otherwise.
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use icicle::HostOrDeviceSlice;
|
||||
use icicle::DeviceContext;
|
||||
use icicle::FieldImpl;
|
||||
use icicle::VecOps;
|
||||
|
||||
let input: HostOrDeviceSlice<i32> = // ...;
|
||||
let mut output: HostOrDeviceSlice<i32> = // ...;
|
||||
let ctx: DeviceContext = // ...;
|
||||
|
||||
transpose_matrix(&input, 5, 4, &mut output, &ctx, true, false)
|
||||
.expect("Failed to transpose matrix");
|
||||
```
|
||||
|
||||
|
||||
The function takes a matrix represented as a 1D slice, transposes it, and stores the result in another 1D slice. The input and output slices can be stored on either the host or the device, and the operation can be performed synchronously or asynchronously.
|
||||
|
||||
The function is generic and can work with any type `F` that implements the `FieldImpl` trait. The `<F as FieldImpl>::Config` type must also implement the `VecOps<F>` trait, which provides the `transpose` method used to perform the actual transposition.
|
||||
|
||||
The function returns an `IcicleResult<()>`, indicating whether the operation was successful or not.
|
||||
@@ -1,117 +0,0 @@
|
||||
# Supporting Additional Curves
|
||||
|
||||
We understand the need for ZK developers to use different curves, some common some more exotic. For this reason we designed ICICLE to allow developers to add any curve they desire.
|
||||
|
||||
## ICICLE Core
|
||||
|
||||
ICICLE core is very generic by design so all algorithms and primitives are designed to work based of configuration files [selected during compile](https://github.com/ingonyama-zk/icicle/blob/main/icicle/curves/curve_config.cuh) time. This is why we compile ICICLE Core per curve.
|
||||
|
||||
To add support for a new curve you must create a new file under [`icicle/curves`](https://github.com/ingonyama-zk/icicle/tree/main/icicle/curves). The file should be named `<curve_name>_params.cuh`.
|
||||
|
||||
### Adding curve_name_params.cuh
|
||||
|
||||
Start by copying `bn254_params.cuh` contents in your params file. Params should include:
|
||||
- **fq_config** - parameters of the Base field.
|
||||
- **limbs_count** - `ceil(field_byte_size / 4)`.
|
||||
- **modulus_bit_count** - bit-size of the modulus.
|
||||
- **num_of_reductions** - the number of times to reduce in reduce function. Use 2 if not sure.
|
||||
- **modulus** - modulus of the field.
|
||||
- **modulus_2** - modulus * 2.
|
||||
- **modulus_4** - modulus * 4.
|
||||
- **neg_modulus** - negated modulus.
|
||||
- **modulus_wide** - modulus represented as a double-sized integer.
|
||||
- **modulus_squared** - modulus**2 represented as a double-sized integer.
|
||||
- **modulus_squared_2** - 2 * modulus**2 represented as a double-sized integer.
|
||||
- **modulus_squared_4** - 4 * modulus**2 represented as a double-sized integer.
|
||||
- **m** - value used in multiplication. Can be computed as `2**(2*modulus_bit_count) // modulus`.
|
||||
- **one** - multiplicative identity.
|
||||
- **zero** - additive identity.
|
||||
- **montgomery_r** - `2 ** M % modulus` where M is a closest (larger than) bitsize multiple of 32. E.g. 384 or 768 for bls and bw curves respectively
|
||||
- **montgomery_r_inv** - `2 ** (-M) % modulus`
|
||||
- **fp_config** - parameters of the Scalar field.
|
||||
Same as fq_config, but with additional arguments:
|
||||
- **omegas_count** - [two-adicity](https://cryptologie.net/article/559/whats-two-adicity/) of the field. And thus the maximum size of NTT.
|
||||
- **omegas** - an array of omegas for NTTs. An array of size `omegas_count`. The ith element is equal to `1.nth_root(2**(2**(omegas_count-i)))`.
|
||||
- **inv** - an array of inverses of powers of two in a field. Ith element is equal to `(2 ** (i+1)) ** -1`.
|
||||
- **G1 generators points** - affine coordinates of the generator point.
|
||||
- **G2 generators points** - affine coordinates of the extension generator. Remove these if `G2` is not supported.
|
||||
- **Weierstrass b value** - base field element equal to value of `b` in the curve equation.
|
||||
- **Weierstrass b value G2** - base field element equal to value of `b` for the extension. Remove this if `G2` is not supported.
|
||||
|
||||
:::note
|
||||
|
||||
All the params are not in Montgomery form.
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
||||
To convert number values into `storage` type you can use the following python function
|
||||
|
||||
```python
|
||||
import struct
|
||||
|
||||
def unpack(x, field_size):
|
||||
return ', '.join(["0x" + format(x, '08x') for x in struct.unpack('I' * (field_size) // 4, int(x).to_bytes(field_size, 'little'))])
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
We also require some changes to [`curve_config.cuh`](https://github.com/ingonyama-zk/icicle/blob/main/icicle/curves/curve_config.cuh#L16-L29), we need to add a new curve id.
|
||||
|
||||
```
|
||||
...
|
||||
|
||||
#define BN254 1
|
||||
#define BLS12_381 2
|
||||
#define BLS12_377 3
|
||||
#define BW6_761 4
|
||||
#define GRUMPKIN 5
|
||||
#define <curve_name> 6
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
Make sure to modify the [rest of the file](https://github.com/ingonyama-zk/icicle/blob/4beda3a900eda961f39af3a496f8184c52bf3b41/icicle/curves/curve_config.cuh#L16-L29) accordingly.
|
||||
|
||||
Finally we must modify the [`make` file](https://github.com/ingonyama-zk/icicle/blob/main/icicle/CMakeLists.txt#L64) to make sure we can compile our new curve.
|
||||
|
||||
```
|
||||
set(SUPPORTED_CURVES bn254;bls12_381;bls12_377;bw6_761;grumpkin;<curve_name>)
|
||||
```
|
||||
|
||||
### Adding Poseidon support
|
||||
|
||||
If you want your curve to implement a Poseidon hash function or a tree builder, you will need to pre-calculate its optimized parameters.
|
||||
Copy [constants_template.h](https://github.com/ingonyama-zk/icicle/blob/main/icicle/appUtils/poseidon/constants/constants_template.h) into `icicle/appUtils/poseidon/constants/<CURVE>_poseidon.h`. Run the [constants generation script](https://dev.ingonyama.com/icicle/primitives/poseidon#constants). The script will print the number of partial rounds and generate a `constants.bin` file. Use `xxd -i constants.bin` to parse the file into C declarations. Copy the `unsigned char constants_bin[]` contents inside your new file. Repeat this process for arities 2, 4, 8 and 11.
|
||||
|
||||
After you've generated the constants, add your curve in this [SUPPORTED_CURVES_WITH_POSEIDON](https://github.com/ingonyama-zk/icicle/blob/main/icicle/CMakeLists.txt#L72) in the `CMakeLists.txt`.
|
||||
|
||||
## Bindings
|
||||
|
||||
In order to support a new curve in the binding libraries you first must support it in ICICLE core.
|
||||
|
||||
### Rust
|
||||
|
||||
Go to [rust curves folder](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust/icicle-curves) and copy `icicle-curve-template` to a new folder named `icicle-<curve_name>`.
|
||||
|
||||
Find all the occurrences of `<CURVE>` placeholder inside the crate. (You can use `Ctrl+Shift+F` in VS Code or `grep -nr "<CURVE>"` in bash). You will then need to replace each occurrence with your new curve name.
|
||||
|
||||
#### Limbs
|
||||
|
||||
Go to your curve's `curve.rs` file and set `SCALAR_LIMBS`, `BASE_LIMBS` and `G2_BASE_LIMBS` (if G2 is needed) to a minimum number of `u64` required to store a single scalar field / base field element respectively.
|
||||
e.g. for bn254, scalar field is 254 bit so `SCALAR_LIMBS` is set to 4.
|
||||
|
||||
#### Primitives
|
||||
|
||||
If your curve doesn't support some of the primitives (ntt/msm/poseidon/merkle tree/), or you simply don't want to include it, just remove a corresponding module from `src` and then from `lib.rs`
|
||||
|
||||
#### G2
|
||||
|
||||
If your curve doesn't support G2 - remove all the code under `#[cfg(feature = "g2")]` and remove the feature from [Cargo.toml](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/rust/icicle-curves/icicle-bn254/Cargo.toml#L29) and [build.rs](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/rust/icicle-curves/icicle-bn254/build.rs#L15).
|
||||
|
||||
After this is done, add your new crate in the [global Cargo.toml](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust/Cargo.toml).
|
||||
|
||||
### Golang
|
||||
|
||||
Golang is WIP in v1, coming soon. Please checkout a previous [release v0.1.0](https://github.com/ingonyama-zk/icicle/releases/tag/v0.1.0) for golang bindings.
|
||||
9761
docs/package-lock.json
generated
9761
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -21,8 +21,8 @@ module.exports = {
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "ICICLE Provers",
|
||||
id: "icicle/integrations"
|
||||
label: "ICICLE Core",
|
||||
id: "icicle/core",
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
@@ -54,15 +54,20 @@ module.exports = {
|
||||
label: "NTT",
|
||||
id: "icicle/golang-bindings/ntt",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "EC-NTT",
|
||||
id: "icicle/golang-bindings/ecntt",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Vector operations",
|
||||
id: "icicle/golang-bindings/vec-ops",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Multi GPU Support",
|
||||
id: "icicle/golang-bindings/multi-gpu",
|
||||
type: "doc",
|
||||
label: "Multi GPU Support",
|
||||
id: "icicle/golang-bindings/multi-gpu",
|
||||
},
|
||||
]
|
||||
},
|
||||
@@ -96,6 +101,11 @@ module.exports = {
|
||||
label: "NTT",
|
||||
id: "icicle/rust-bindings/ntt",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "EC-NTT",
|
||||
id: "icicle/rust-bindings/ecntt",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Vector operations",
|
||||
@@ -106,6 +116,11 @@ module.exports = {
|
||||
label: "Multi GPU Support",
|
||||
id: "icicle/rust-bindings/multi-gpu",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Polynomials",
|
||||
id: "icicle/rust-bindings/polynomials",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -134,6 +149,11 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Polynomials",
|
||||
id: "icicle/polynomials/overview",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Multi GPU Support",
|
||||
@@ -141,13 +161,13 @@ module.exports = {
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Supporting additional curves",
|
||||
id: "icicle/supporting-additional-curves",
|
||||
label: "Google Colab Instructions",
|
||||
id: "icicle/colab-instructions",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Google Colab Instructions",
|
||||
id: "icicle/colab-instructions",
|
||||
label: "ICICLE Provers",
|
||||
id: "icicle/integrations"
|
||||
},
|
||||
]
|
||||
},
|
||||
|
||||
@@ -8,18 +8,16 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
|
||||
endif ()
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
project(example LANGUAGES CUDA CXX)
|
||||
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda-12.0/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_curve_bn254.a)
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
@@ -3,7 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254 -DG2=ON
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -2,11 +2,8 @@
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
|
||||
#define G2_DEFINED
|
||||
#define CURVE_ID 1
|
||||
// include MSM template
|
||||
#include "appUtils/msm/msm.cu"
|
||||
using namespace curve_config;
|
||||
#include "api/bn254.h"
|
||||
using namespace bn254;
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
@@ -24,11 +21,10 @@ int main(int argc, char* argv[])
|
||||
scalar_t* scalars = new scalar_t[N];
|
||||
affine_t* points = new affine_t[N];
|
||||
projective_t result;
|
||||
scalar_t::RandHostMany(scalars, N);
|
||||
projective_t::RandHostManyAffine(points, N);
|
||||
scalar_t::rand_host_many(scalars, N);
|
||||
projective_t::rand_host_many_affine(points, N);
|
||||
|
||||
std::cout << "Using default MSM configuration with on-host inputs" << std::endl;
|
||||
// auto config = msm::DefaultMSMConfig();
|
||||
device_context::DeviceContext ctx = device_context::get_default_device_context();
|
||||
msm::MSMConfig config = {
|
||||
ctx, // ctx
|
||||
@@ -49,28 +45,9 @@ int main(int argc, char* argv[])
|
||||
config.batch_size = batch_size;
|
||||
|
||||
std::cout << "Running MSM kernel with on-host inputs" << std::endl;
|
||||
// Create two events to time the MSM kernel
|
||||
cudaStream_t stream = config.ctx.stream;
|
||||
cudaEvent_t start, stop;
|
||||
float time;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
// Record the start event on the stream
|
||||
cudaEventRecord(start, stream);
|
||||
// Execute the MSM kernel
|
||||
msm::MSM<scalar_t, affine_t, projective_t>(scalars, points, msm_size, config, &result);
|
||||
// Record the stop event on the stream
|
||||
cudaEventRecord(stop, stream);
|
||||
// Wait for the stop event to complete
|
||||
cudaEventSynchronize(stop);
|
||||
// Calculate the elapsed time between the start and stop events
|
||||
cudaEventElapsedTime(&time, start, stop);
|
||||
// Destroy the events
|
||||
cudaEventDestroy(start);
|
||||
cudaEventDestroy(stop);
|
||||
// Print the elapsed time
|
||||
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
|
||||
// Print the result
|
||||
bn254_msm_cuda(scalars, points, msm_size, config, &result);
|
||||
std::cout << projective_t::to_affine(result) << std::endl;
|
||||
|
||||
std::cout << "Copying inputs on-device" << std::endl;
|
||||
@@ -89,24 +66,9 @@ int main(int argc, char* argv[])
|
||||
config.are_points_on_device = true;
|
||||
|
||||
std::cout << "Running MSM kernel with on-device inputs" << std::endl;
|
||||
// Create two events to time the MSM kernel
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
// Record the start event on the stream
|
||||
cudaEventRecord(start, stream);
|
||||
// Execute the MSM kernel
|
||||
msm::MSM<scalar_t, affine_t, projective_t>(scalars_d, points_d, msm_size, config, result_d);
|
||||
// Record the stop event on the stream
|
||||
cudaEventRecord(stop, stream);
|
||||
// Wait for the stop event to complete
|
||||
cudaEventSynchronize(stop);
|
||||
// Calculate the elapsed time between the start and stop events
|
||||
cudaEventElapsedTime(&time, start, stop);
|
||||
// Destroy the events
|
||||
cudaEventDestroy(start);
|
||||
cudaEventDestroy(stop);
|
||||
// Print the elapsed time
|
||||
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
|
||||
bn254_msm_cuda(scalars_d, points_d, msm_size, config, result_d);
|
||||
|
||||
// Copy the result back to the host
|
||||
cudaMemcpy(&result, result_d, sizeof(projective_t), cudaMemcpyDeviceToHost);
|
||||
// Print the result
|
||||
@@ -123,23 +85,14 @@ int main(int argc, char* argv[])
|
||||
std::cout << "Generating random inputs on-host" << std::endl;
|
||||
// use the same scalars
|
||||
g2_affine_t* g2_points = new g2_affine_t[N];
|
||||
g2_projective_t::RandHostManyAffine(g2_points, N);
|
||||
g2_projective_t::rand_host_many_affine(g2_points, N);
|
||||
|
||||
std::cout << "Reconfiguring MSM to use on-host inputs" << std::endl;
|
||||
config.are_results_on_device = false;
|
||||
config.are_scalars_on_device = false;
|
||||
config.are_points_on_device = false;
|
||||
g2_projective_t g2_result;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start, stream);
|
||||
msm::MSM<scalar_t, g2_affine_t, g2_projective_t>(scalars, g2_points, msm_size, config, &g2_result);
|
||||
cudaEventRecord(stop, stream);
|
||||
cudaEventSynchronize(stop);
|
||||
cudaEventElapsedTime(&time, start, stop);
|
||||
cudaEventDestroy(start);
|
||||
cudaEventDestroy(stop);
|
||||
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
|
||||
bn254_g2_msm_cuda(scalars, g2_points, msm_size, config, &g2_result);
|
||||
std::cout << g2_projective_t::to_affine(g2_result) << std::endl;
|
||||
|
||||
std::cout << "Copying inputs on-device" << std::endl;
|
||||
@@ -157,16 +110,7 @@ int main(int argc, char* argv[])
|
||||
config.are_points_on_device = true;
|
||||
|
||||
std::cout << "Running MSM kernel with on-device inputs" << std::endl;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start, stream);
|
||||
msm::MSM<scalar_t, g2_affine_t, g2_projective_t>(scalars_d, g2_points_d, msm_size, config, g2_result_d);
|
||||
cudaEventRecord(stop, stream);
|
||||
cudaEventSynchronize(stop);
|
||||
cudaEventElapsedTime(&time, start, stop);
|
||||
cudaEventDestroy(start);
|
||||
cudaEventDestroy(stop);
|
||||
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
|
||||
bn254_g2_msm_cuda(scalars_d, g2_points_d, msm_size, config, g2_result_d);
|
||||
cudaMemcpy(&g2_result, g2_result_d, sizeof(g2_projective_t), cudaMemcpyDeviceToHost);
|
||||
std::cout << g2_projective_t::to_affine(g2_result) << std::endl;
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
|
||||
@@ -14,11 +14,13 @@ set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_curve_bn254.a)
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
@@ -3,7 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -1,16 +1,13 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
#include <nvml.h>
|
||||
|
||||
// select the curve
|
||||
#define CURVE_ID 2
|
||||
#include "appUtils/poseidon/poseidon.cu"
|
||||
#include "utils/error_handler.cuh"
|
||||
#include "api/bn254.h"
|
||||
#include "gpu-utils/error_handler.cuh"
|
||||
|
||||
using namespace poseidon;
|
||||
using namespace curve_config;
|
||||
using namespace bn254;
|
||||
|
||||
void checkCudaError(cudaError_t error) {
|
||||
if (error != cudaSuccess) {
|
||||
@@ -39,7 +36,7 @@ void threadPoseidon(device_context::DeviceContext ctx, unsigned size_partition,
|
||||
false, // loop_state
|
||||
false, // is_async
|
||||
};
|
||||
cudaError_t err = poseidon_hash<scalar_t, size_col+1>(layers, column_hashes, (size_t) size_partition, *constants, column_config);
|
||||
cudaError_t err = bn254_poseidon_hash_cuda(layers, column_hashes, (size_t) size_partition, size_col, *constants, column_config);
|
||||
checkCudaError(err);
|
||||
}
|
||||
|
||||
@@ -109,13 +106,13 @@ int main() {
|
||||
CHECK_ALLOC(column_hash1);
|
||||
|
||||
PoseidonConstants<scalar_t> column_constants0, column_constants1;
|
||||
init_optimized_poseidon_constants<scalar_t>(size_col, ctx0, &column_constants0);
|
||||
bn254_init_optimized_poseidon_constants_cuda(size_col, ctx0, &column_constants0);
|
||||
cudaError_t err_result = CHK_STICKY(cudaSetDevice(ctx1.device_id));
|
||||
if (err_result != cudaSuccess) {
|
||||
std::cerr << "CUDA error: " << cudaGetErrorString(err_result) << std::endl;
|
||||
return;
|
||||
}
|
||||
init_optimized_poseidon_constants<scalar_t>(size_col, ctx1, &column_constants1);
|
||||
bn254_init_optimized_poseidon_constants_cuda(size_col, ctx1, &column_constants1);
|
||||
|
||||
std::cout << "Parallel execution of Poseidon threads" << std::endl;
|
||||
START_TIMER(parallel);
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
|
||||
@@ -8,17 +8,17 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
|
||||
endif ()
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
project(example LANGUAGES CUDA CXX)
|
||||
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
@@ -3,7 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DMSM=OFF -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -3,22 +3,21 @@
|
||||
#include <chrono>
|
||||
#include <nvml.h>
|
||||
|
||||
#define CURVE_ID 1
|
||||
#include "curves/curve_config.cuh"
|
||||
#include "utils/device_context.cuh"
|
||||
#include "utils/vec_ops.cu"
|
||||
#include "api/bn254.h"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
|
||||
using namespace curve_config;
|
||||
using namespace vec_ops;
|
||||
using namespace bn254;
|
||||
|
||||
typedef scalar_t T;
|
||||
|
||||
int vector_mult(T* vec_b, T* vec_a, T* vec_result, size_t n_elments, device_context::DeviceContext ctx)
|
||||
{
|
||||
vec_ops::VecOpsConfig<scalar_t> config = vec_ops::DefaultVecOpsConfig<scalar_t>();
|
||||
vec_ops::VecOpsConfig config = vec_ops::DefaultVecOpsConfig();
|
||||
config.is_a_on_device = true;
|
||||
config.is_b_on_device = true;
|
||||
config.is_result_on_device = true;
|
||||
cudaError_t err = vec_ops::Mul<T>(vec_a, vec_b, n_elments, config, vec_result);
|
||||
cudaError_t err = bn254_mul_cuda(vec_a, vec_b, n_elments, config, vec_result);
|
||||
if (err != cudaSuccess) {
|
||||
std::cerr << "Failed to multiply vectors - " << cudaGetErrorString(err) << std::endl;
|
||||
return 0;
|
||||
@@ -63,8 +62,8 @@ int main(int argc, char** argv)
|
||||
T* host_in1 = (T*)malloc(vector_size * sizeof(T));
|
||||
T* host_in2 = (T*)malloc(vector_size * sizeof(T));
|
||||
std::cout << "Initializing vectors with random data" << std::endl;
|
||||
T::RandHostMany(host_in1, vector_size);
|
||||
T::RandHostMany(host_in2, vector_size);
|
||||
T::rand_host_many(host_in1, vector_size);
|
||||
T::rand_host_many(host_in2, vector_size);
|
||||
// device data
|
||||
device_context::DeviceContext ctx = device_context::get_default_device_context();
|
||||
T* device_in1;
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
|
||||
@@ -8,19 +8,16 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
|
||||
endif ()
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
project(example LANGUAGES CUDA CXX)
|
||||
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda-12.0/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
@@ -3,9 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DMSM=OFF -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -1,12 +1,11 @@
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
|
||||
// select the curve
|
||||
#define CURVE_ID 1
|
||||
// include NTT template
|
||||
#include "appUtils/ntt/ntt.cu"
|
||||
#include "appUtils/ntt/kernel_ntt.cu"
|
||||
using namespace curve_config;
|
||||
|
||||
#include "curves/params/bn254.cuh"
|
||||
#include "api/bn254.h"
|
||||
using namespace bn254;
|
||||
using namespace ntt;
|
||||
|
||||
// Operate on scalars
|
||||
@@ -86,14 +85,14 @@ int main(int argc, char* argv[])
|
||||
std::cout << "Running NTT with on-host data" << std::endl;
|
||||
// Create a device context
|
||||
auto ctx = device_context::get_default_device_context();
|
||||
const S basic_root = S::omega(log_ntt_size /*NTT_LOG_SIZE*/);
|
||||
InitDomain(basic_root, ctx);
|
||||
S basic_root = S::omega(log_ntt_size /*NTT_LOG_SIZE*/);
|
||||
bn254_initialize_domain(&basic_root, ctx, true);
|
||||
// Create an NTTConfig instance
|
||||
NTTConfig<S> config = DefaultNTTConfig<S>();
|
||||
NTTConfig<S> config = default_ntt_config<S>();
|
||||
config.ntt_algorithm = NttAlgorithm::MixedRadix;
|
||||
config.batch_size = nof_ntts;
|
||||
START_TIMER(MixedRadix);
|
||||
cudaError_t err = NTT<S, E>(input, ntt_size, NTTDir::kForward, config, output);
|
||||
cudaError_t err = bn254_ntt_cuda(input, ntt_size, NTTDir::kForward, config, output);
|
||||
END_TIMER(MixedRadix, "MixedRadix NTT");
|
||||
|
||||
std::cout << "Validating output" << std::endl;
|
||||
@@ -101,7 +100,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
config.ntt_algorithm = NttAlgorithm::Radix2;
|
||||
START_TIMER(Radix2);
|
||||
err = NTT<S, E>(input, ntt_size, NTTDir::kForward, config, output);
|
||||
err = bn254_ntt_cuda(input, ntt_size, NTTDir::kForward, config, output);
|
||||
END_TIMER(Radix2, "Radix2 NTT");
|
||||
|
||||
std::cout << "Validating output" << std::endl;
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
@@ -8,18 +8,19 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
|
||||
endif ()
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
project(example LANGUAGES CUDA CXX)
|
||||
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_curve_bn254.a)
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
|
||||
@@ -3,7 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -4,9 +4,9 @@
|
||||
#include <cassert>
|
||||
#include <nvml.h>
|
||||
|
||||
#define CURVE_ID BN254
|
||||
#include "appUtils/msm/msm.cu"
|
||||
using namespace curve_config;
|
||||
#include "api/bn254.h"
|
||||
#include "msm/msm.cuh"
|
||||
using namespace bn254;
|
||||
|
||||
typedef point_field_t T;
|
||||
|
||||
@@ -138,15 +138,15 @@ int main(int argc, char** argv)
|
||||
std::cout << "Generating commitment vector" << std::endl;
|
||||
projective_t result;
|
||||
scalar_t* scalars = new scalar_t[N+1];
|
||||
scalar_t::RandHostMany(scalars, N);
|
||||
scalar_t::rand_host_many(scalars, N);
|
||||
|
||||
std::cout << "Generating salt" << std::endl;
|
||||
scalars[N] = scalar_t::rand_host();
|
||||
|
||||
std::cout << "Executing MSM" << std::endl;
|
||||
auto config = msm::DefaultMSMConfig<scalar_t>();
|
||||
auto config = msm::default_msm_config();
|
||||
START_TIMER(msm);
|
||||
msm::MSM<scalar_t, affine_t, projective_t>(scalars, points, N+1, config, &result);
|
||||
bn254_msm_cuda(scalars, points, N+1, config, &result);
|
||||
END_TIMER(msm, "Time to execute MSM");
|
||||
|
||||
std::cout << "Computed commitment: " << result << std::endl;
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
|
||||
@@ -8,7 +8,7 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
|
||||
endif ()
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
project(example LANGUAGES CUDA CXX)
|
||||
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
@@ -20,7 +20,8 @@ add_executable(
|
||||
example.cu
|
||||
)
|
||||
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda-12.0/targets/x86_64-linux/lib/stubs/ )
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
@@ -3,9 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DMSM=OFF -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -1,18 +1,14 @@
|
||||
#define CURVE_ID BLS12_381
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "curves/curve_config.cuh"
|
||||
#include "appUtils/ntt/ntt.cu"
|
||||
#include "appUtils/ntt/kernel_ntt.cu"
|
||||
#include "utils/vec_ops.cu"
|
||||
#include "utils/error_handler.cuh"
|
||||
#include <memory>
|
||||
|
||||
typedef curve_config::scalar_t test_scalar;
|
||||
typedef curve_config::scalar_t test_data;
|
||||
#include "api/bn254.h"
|
||||
#include "gpu-utils/error_handler.cuh"
|
||||
|
||||
using namespace bn254;
|
||||
typedef scalar_t test_scalar;
|
||||
typedef scalar_t test_data;
|
||||
|
||||
void random_samples(test_data* res, uint32_t count)
|
||||
{
|
||||
@@ -45,7 +41,7 @@ int main(int argc, char** argv)
|
||||
CHK_IF_RETURN(cudaFree(nullptr)); // init GPU context
|
||||
|
||||
// init domain
|
||||
auto ntt_config = ntt::DefaultNTTConfig<test_scalar>();
|
||||
auto ntt_config = ntt::default_ntt_config<test_scalar>();
|
||||
const bool is_radix2_alg = (argc > 1) ? atoi(argv[1]) : false;
|
||||
ntt_config.ntt_algorithm = is_radix2_alg ? ntt::NttAlgorithm::Radix2 : ntt::NttAlgorithm::MixedRadix;
|
||||
|
||||
@@ -55,8 +51,8 @@ int main(int argc, char** argv)
|
||||
CHK_IF_RETURN(cudaEventCreate(&start));
|
||||
CHK_IF_RETURN(cudaEventCreate(&stop));
|
||||
|
||||
const test_scalar basic_root = test_scalar::omega(NTT_LOG_SIZE);
|
||||
ntt::InitDomain(basic_root, ntt_config.ctx, true /*=fast_twidddles_mode*/);
|
||||
test_scalar basic_root = test_scalar::omega(NTT_LOG_SIZE);
|
||||
bn254_initialize_domain(&basic_root, ntt_config.ctx, true /*=fast_twidddles_mode*/);
|
||||
|
||||
// (1) cpu allocation
|
||||
auto CpuA = std::make_unique<test_data[]>(NTT_SIZE);
|
||||
@@ -79,26 +75,25 @@ int main(int argc, char** argv)
|
||||
ntt_config.are_inputs_on_device = false;
|
||||
ntt_config.are_outputs_on_device = true;
|
||||
ntt_config.ordering = ntt::Ordering::kNM;
|
||||
CHK_IF_RETURN(ntt::NTT(CpuA.get(), NTT_SIZE, ntt::NTTDir::kForward, ntt_config, GpuA));
|
||||
CHK_IF_RETURN(ntt::NTT(CpuB.get(), NTT_SIZE, ntt::NTTDir::kForward, ntt_config, GpuB));
|
||||
CHK_IF_RETURN(bn254_ntt_cuda(CpuA.get(), NTT_SIZE, ntt::NTTDir::kForward, ntt_config, GpuA));
|
||||
CHK_IF_RETURN(bn254_ntt_cuda(CpuB.get(), NTT_SIZE, ntt::NTTDir::kForward, ntt_config, GpuB));
|
||||
|
||||
// (4) multiply A,B
|
||||
CHK_IF_RETURN(cudaMallocAsync(&MulGpu, sizeof(test_data) * NTT_SIZE, ntt_config.ctx.stream));
|
||||
vec_ops::VecOpsConfig<test_data> config{
|
||||
vec_ops::VecOpsConfig config{
|
||||
ntt_config.ctx,
|
||||
true, // is_a_on_device
|
||||
true, // is_b_on_device
|
||||
true, // is_result_on_device
|
||||
false, // is_montgomery
|
||||
false // is_async
|
||||
};
|
||||
CHK_IF_RETURN(vec_ops::Mul(GpuA, GpuB, NTT_SIZE, config, MulGpu));
|
||||
CHK_IF_RETURN(bn254_mul_cuda(GpuA, GpuB, NTT_SIZE, config, MulGpu));
|
||||
|
||||
// (5) INTT (in place)
|
||||
ntt_config.are_inputs_on_device = true;
|
||||
ntt_config.are_outputs_on_device = true;
|
||||
ntt_config.ordering = ntt::Ordering::kMN;
|
||||
CHK_IF_RETURN(ntt::NTT(MulGpu, NTT_SIZE, ntt::NTTDir::kInverse, ntt_config, MulGpu));
|
||||
CHK_IF_RETURN(bn254_ntt_cuda(MulGpu, NTT_SIZE, ntt::NTTDir::kInverse, ntt_config, MulGpu));
|
||||
|
||||
CHK_IF_RETURN(cudaFreeAsync(GpuA, ntt_config.ctx.stream));
|
||||
CHK_IF_RETURN(cudaFreeAsync(GpuB, ntt_config.ctx.stream));
|
||||
@@ -117,7 +112,7 @@ int main(int argc, char** argv)
|
||||
benchmark(false); // warmup
|
||||
benchmark(true, 20);
|
||||
|
||||
ntt::ReleaseDomain<test_scalar>(ntt_config.ctx);
|
||||
bn254_release_domain(ntt_config.ctx);
|
||||
CHK_IF_RETURN(cudaStreamSynchronize(ntt_config.ctx.stream));
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
./build/example 1 # radix2
|
||||
./build/example 0 # mixed-radix
|
||||
./build/example/example 1 # radix2
|
||||
./build/example/example 0 # mixed-radix
|
||||
|
||||
@@ -13,13 +13,11 @@ project(icicle LANGUAGES CUDA CXX)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
|
||||
# change the path to your Icicle location
|
||||
include_directories("../../../icicle")
|
||||
|
||||
add_executable(
|
||||
example
|
||||
example.cu
|
||||
)
|
||||
|
||||
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda-12.0/targets/x86_64-linux/lib/stubs/ )
|
||||
target_link_libraries(example ${NVML_LIBRARY})
|
||||
target_include_directories(example PRIVATE "../../../icicle/include")
|
||||
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
|
||||
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
@@ -3,7 +3,13 @@
|
||||
# Exit immediately on error
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
mkdir -p build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
mkdir -p build/example
|
||||
mkdir -p build/icicle
|
||||
|
||||
# Configure and build Icicle
|
||||
cmake -S ../../../icicle/ -B build/icicle -DMSM=OFF -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
|
||||
cmake --build build/icicle
|
||||
|
||||
# Configure and build the example application
|
||||
cmake -S . -B build/example
|
||||
cmake --build build/example
|
||||
@@ -2,14 +2,12 @@
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
// select the curve
|
||||
#define CURVE_ID 2
|
||||
// include Poseidon template
|
||||
#include "appUtils/poseidon/poseidon.cu"
|
||||
#include "api/bn254.h"
|
||||
#include "curves/params/bn254.cuh"
|
||||
using namespace poseidon;
|
||||
using namespace curve_config;
|
||||
using namespace bn254;
|
||||
|
||||
device_context::DeviceContext ctx= device_context::get_default_device_context();
|
||||
device_context::DeviceContext ctx = device_context::get_default_device_context();
|
||||
|
||||
// location of a tree node in the array for a given level and offset
|
||||
inline uint32_t tree_index(uint32_t level, uint32_t offset) { return (1 << level) - 1 + offset; }
|
||||
@@ -21,8 +19,7 @@ void build_tree(
|
||||
for (uint32_t level = tree_height - 1; level > 0; level--) {
|
||||
const uint32_t next_level = level - 1;
|
||||
const uint32_t next_level_width = 1 << next_level;
|
||||
poseidon_hash<scalar_t, 2+1>(
|
||||
&tree[tree_index(level, 0)], &tree[tree_index(next_level, 0)], next_level_width, *constants, config);
|
||||
bn254_poseidon_hash_cuda(&tree[tree_index(level, 0)], &tree[tree_index(next_level, 0)], next_level_width, 2, *constants, config);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +82,7 @@ uint32_t validate_proof(
|
||||
hashes_in[1] = level_hash;
|
||||
}
|
||||
// next level hash
|
||||
poseidon_hash<scalar_t, 2+1>(hashes_in, hash_out, 1, *constants, config);
|
||||
bn254_poseidon_hash_cuda(hashes_in, hash_out, 1, 2, *constants, config);
|
||||
level_hash = hash_out[0];
|
||||
}
|
||||
return proof_hash[0] == level_hash;
|
||||
@@ -116,14 +113,14 @@ int main(int argc, char* argv[])
|
||||
}
|
||||
std::cout << "Hashing blocks into tree leaves..." << std::endl;
|
||||
PoseidonConstants<scalar_t> constants;
|
||||
init_optimized_poseidon_constants<scalar_t>(data_arity, ctx, &constants);
|
||||
PoseidonConfig config = default_poseidon_config<scalar_t>(data_arity+1);
|
||||
poseidon_hash<curve_config::scalar_t, data_arity+1>(data, &tree[tree_index(leaf_level, 0)], tree_width, constants, config);
|
||||
bn254_init_optimized_poseidon_constants_cuda(data_arity, ctx, &constants);
|
||||
PoseidonConfig config = default_poseidon_config(data_arity+1);
|
||||
bn254_poseidon_hash_cuda(data, &tree[tree_index(leaf_level, 0)], tree_width, 4, constants, config);
|
||||
|
||||
std::cout << "3. Building Merkle tree" << std::endl;
|
||||
PoseidonConstants<scalar_t> tree_constants;
|
||||
init_optimized_poseidon_constants<scalar_t>(tree_arity, ctx, &tree_constants);
|
||||
PoseidonConfig tree_config = default_poseidon_config<scalar_t>(tree_arity+1);
|
||||
bn254_init_optimized_poseidon_constants_cuda(tree_arity, ctx, &tree_constants);
|
||||
PoseidonConfig tree_config = default_poseidon_config(tree_arity+1);
|
||||
build_tree(tree_height, tree, &tree_constants, tree_config);
|
||||
|
||||
std::cout << "4. Generate membership proof" << std::endl;
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
./build/example
|
||||
./build/example/example
|
||||
@@ -8,12 +8,11 @@ icicle-cuda-runtime = { path = "../../../wrappers/rust/icicle-cuda-runtime" }
|
||||
icicle-core = { path = "../../../wrappers/rust/icicle-core" }
|
||||
icicle-bn254 = { path = "../../../wrappers/rust/icicle-curves/icicle-bn254", features = ["g2"] }
|
||||
icicle-bls12-377 = { path = "../../../wrappers/rust/icicle-curves/icicle-bls12-377" }
|
||||
ark-bn254 = { version = "0.4.0", optional = true}
|
||||
ark-bls12-377 = { version = "0.4.0", optional = true}
|
||||
ark-ec = { version = "0.4.0", optional = true}
|
||||
ark-bn254 = { version = "0.4.0", optional = true }
|
||||
ark-bls12-377 = { version = "0.4.0", optional = true }
|
||||
ark-ec = { version = "0.4.0", optional = true }
|
||||
clap = { version = "<=4.4.12", features = ["derive"] }
|
||||
|
||||
[features]
|
||||
arkworks = ["ark-bn254", "ark-bls12-377", "ark-ec", "icicle-core/arkworks", "icicle-bn254/arkworks", "icicle-bls12-377/arkworks"]
|
||||
profile = []
|
||||
g2 = []
|
||||
|
||||
@@ -4,7 +4,10 @@ use icicle_bls12_377::curve::{
|
||||
CurveCfg as BLS12377CurveCfg, G1Projective as BLS12377G1Projective, ScalarCfg as BLS12377ScalarCfg,
|
||||
};
|
||||
|
||||
use icicle_cuda_runtime::{memory::HostOrDeviceSlice, stream::CudaStream};
|
||||
use icicle_cuda_runtime::{
|
||||
memory::{DeviceVec, HostSlice},
|
||||
stream::CudaStream,
|
||||
};
|
||||
|
||||
use icicle_core::{curve::Curve, msm, traits::GenerateRandom};
|
||||
|
||||
@@ -57,18 +60,18 @@ fn main() {
|
||||
log_size, size
|
||||
);
|
||||
// Setting Bn254 points and scalars
|
||||
let points = HostOrDeviceSlice::Host(upper_points[..size].to_vec());
|
||||
let g2_points = HostOrDeviceSlice::Host(g2_upper_points[..size].to_vec());
|
||||
let scalars = HostOrDeviceSlice::Host(upper_scalars[..size].to_vec());
|
||||
let points = HostSlice::from_slice(&upper_points[..size]);
|
||||
let g2_points = HostSlice::from_slice(&g2_upper_points[..size]);
|
||||
let scalars = HostSlice::from_slice(&upper_scalars[..size]);
|
||||
|
||||
// Setting bls12377 points and scalars
|
||||
// let points_bls12377 = &upper_points_bls12377[..size];
|
||||
let points_bls12377 = HostOrDeviceSlice::Host(upper_points_bls12377[..size].to_vec()); // &upper_points_bls12377[..size];
|
||||
let scalars_bls12377 = HostOrDeviceSlice::Host(upper_scalars_bls12377[..size].to_vec());
|
||||
let points_bls12377 = HostSlice::from_slice(&upper_points_bls12377[..size]); // &upper_points_bls12377[..size];
|
||||
let scalars_bls12377 = HostSlice::from_slice(&upper_scalars_bls12377[..size]);
|
||||
|
||||
println!("Configuring bn254 MSM...");
|
||||
let mut msm_results: HostOrDeviceSlice<'_, G1Projective> = HostOrDeviceSlice::cuda_malloc(1).unwrap();
|
||||
let mut g2_msm_results: HostOrDeviceSlice<'_, G2Projective> = HostOrDeviceSlice::cuda_malloc(1).unwrap();
|
||||
let mut msm_results = DeviceVec::<G1Projective>::cuda_malloc(1).unwrap();
|
||||
let mut g2_msm_results = DeviceVec::<G2Projective>::cuda_malloc(1).unwrap();
|
||||
let stream = CudaStream::create().unwrap();
|
||||
let g2_stream = CudaStream::create().unwrap();
|
||||
let mut cfg = msm::MSMConfig::default();
|
||||
@@ -82,8 +85,7 @@ fn main() {
|
||||
g2_cfg.is_async = true;
|
||||
|
||||
println!("Configuring bls12377 MSM...");
|
||||
let mut msm_results_bls12377: HostOrDeviceSlice<'_, BLS12377G1Projective> =
|
||||
HostOrDeviceSlice::cuda_malloc(1).unwrap();
|
||||
let mut msm_results_bls12377 = DeviceVec::<BLS12377G1Projective>::cuda_malloc(1).unwrap();
|
||||
let stream_bls12377 = CudaStream::create().unwrap();
|
||||
let mut cfg_bls12377 = msm::MSMConfig::default();
|
||||
cfg_bls12377
|
||||
@@ -94,7 +96,7 @@ fn main() {
|
||||
println!("Executing bn254 MSM on device...");
|
||||
#[cfg(feature = "profile")]
|
||||
let start = Instant::now();
|
||||
msm::msm(&scalars, &points, &cfg, &mut msm_results).unwrap();
|
||||
msm::msm(scalars, points, &cfg, &mut msm_results[..]).unwrap();
|
||||
#[cfg(feature = "profile")]
|
||||
println!(
|
||||
"ICICLE BN254 MSM on size 2^{log_size} took: {} ms",
|
||||
@@ -102,16 +104,16 @@ fn main() {
|
||||
.elapsed()
|
||||
.as_millis()
|
||||
);
|
||||
msm::msm(&scalars, &g2_points, &g2_cfg, &mut g2_msm_results).unwrap();
|
||||
msm::msm(scalars, g2_points, &g2_cfg, &mut g2_msm_results[..]).unwrap();
|
||||
|
||||
println!("Executing bls12377 MSM on device...");
|
||||
#[cfg(feature = "profile")]
|
||||
let start = Instant::now();
|
||||
msm::msm(
|
||||
&scalars_bls12377,
|
||||
&points_bls12377,
|
||||
scalars_bls12377,
|
||||
points_bls12377,
|
||||
&cfg_bls12377,
|
||||
&mut msm_results_bls12377,
|
||||
&mut msm_results_bls12377[..],
|
||||
)
|
||||
.unwrap();
|
||||
#[cfg(feature = "profile")]
|
||||
@@ -134,10 +136,10 @@ fn main() {
|
||||
.synchronize()
|
||||
.unwrap();
|
||||
msm_results
|
||||
.copy_to_host(&mut msm_host_result[..])
|
||||
.copy_to_host(HostSlice::from_mut_slice(&mut msm_host_result[..]))
|
||||
.unwrap();
|
||||
g2_msm_results
|
||||
.copy_to_host(&mut g2_msm_host_result[..])
|
||||
.copy_to_host(HostSlice::from_mut_slice(&mut g2_msm_host_result[..]))
|
||||
.unwrap();
|
||||
println!("bn254 result: {:#?}", msm_host_result);
|
||||
println!("G2 bn254 result: {:#?}", g2_msm_host_result);
|
||||
@@ -146,7 +148,7 @@ fn main() {
|
||||
.synchronize()
|
||||
.unwrap();
|
||||
msm_results_bls12377
|
||||
.copy_to_host(&mut msm_host_result_bls12377[..])
|
||||
.copy_to_host(HostSlice::from_mut_slice(&mut msm_host_result_bls12377[..]))
|
||||
.unwrap();
|
||||
println!("bls12377 result: {:#?}", msm_host_result_bls12377);
|
||||
|
||||
@@ -154,23 +156,19 @@ fn main() {
|
||||
{
|
||||
println!("Checking against arkworks...");
|
||||
let ark_points: Vec<Bn254G1Affine> = points
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|&point| point.to_ark())
|
||||
.collect();
|
||||
let ark_scalars: Vec<Bn254Fr> = scalars
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|scalar| scalar.to_ark())
|
||||
.collect();
|
||||
|
||||
let ark_points_bls12377: Vec<Bls12377G1Affine> = points_bls12377
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|point| point.to_ark())
|
||||
.collect();
|
||||
let ark_scalars_bls12377: Vec<Bls12377Fr> = scalars_bls12377
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|scalar| scalar.to_ark())
|
||||
.collect();
|
||||
|
||||
@@ -2,10 +2,14 @@ use icicle_bn254::curve::{ScalarCfg, ScalarField};
|
||||
|
||||
use icicle_bls12_377::curve::{ScalarCfg as BLS12377ScalarCfg, ScalarField as BLS12377ScalarField};
|
||||
|
||||
use icicle_cuda_runtime::{device_context::DeviceContext, memory::HostOrDeviceSlice, stream::CudaStream};
|
||||
use icicle_cuda_runtime::{
|
||||
device_context::DeviceContext,
|
||||
memory::{DeviceVec, HostSlice},
|
||||
stream::CudaStream,
|
||||
};
|
||||
|
||||
use icicle_core::{
|
||||
ntt::{self, NTT},
|
||||
ntt::{self, initialize_domain},
|
||||
traits::{FieldImpl, GenerateRandom},
|
||||
};
|
||||
|
||||
@@ -41,14 +45,13 @@ fn main() {
|
||||
);
|
||||
// Setting Bn254 points and scalars
|
||||
println!("Generating random inputs on host for bn254...");
|
||||
let scalars = HostOrDeviceSlice::Host(ScalarCfg::generate_random(size));
|
||||
let mut ntt_results: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::cuda_malloc(size).unwrap();
|
||||
let scalars = ScalarCfg::generate_random(size);
|
||||
let mut ntt_results = DeviceVec::<ScalarField>::cuda_malloc(size).unwrap();
|
||||
|
||||
// Setting bls12377 points and scalars
|
||||
println!("Generating random inputs on host for bls12377...");
|
||||
let scalars_bls12377 = HostOrDeviceSlice::Host(BLS12377ScalarCfg::generate_random(size));
|
||||
let mut ntt_results_bls12377: HostOrDeviceSlice<'_, BLS12377ScalarField> =
|
||||
HostOrDeviceSlice::cuda_malloc(size).unwrap();
|
||||
let scalars_bls12377 = BLS12377ScalarCfg::generate_random(size);
|
||||
let mut ntt_results_bls12377 = DeviceVec::<BLS12377ScalarField>::cuda_malloc(size).unwrap();
|
||||
|
||||
println!("Setting up bn254 Domain...");
|
||||
let icicle_omega = <Bn254Fr as FftField>::get_root_of_unity(
|
||||
@@ -57,11 +60,11 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
let ctx = DeviceContext::default();
|
||||
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx).unwrap();
|
||||
initialize_domain(ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
|
||||
|
||||
println!("Configuring bn254 NTT...");
|
||||
let stream = CudaStream::create().unwrap();
|
||||
let mut cfg = ntt::NTTConfig::default();
|
||||
let mut cfg = ntt::NTTConfig::<'_, ScalarField>::default();
|
||||
cfg.ctx
|
||||
.stream = &stream;
|
||||
cfg.is_async = true;
|
||||
@@ -73,11 +76,11 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
// reusing ctx from above
|
||||
BLS12377ScalarCfg::initialize_domain(BLS12377ScalarField::from_ark(icicle_omega), &ctx).unwrap();
|
||||
initialize_domain(BLS12377ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
|
||||
|
||||
println!("Configuring bls12377 NTT...");
|
||||
let stream_bls12377 = CudaStream::create().unwrap();
|
||||
let mut cfg_bls12377 = ntt::NTTConfig::default();
|
||||
let mut cfg_bls12377 = ntt::NTTConfig::<'_, BLS12377ScalarField>::default();
|
||||
cfg_bls12377
|
||||
.ctx
|
||||
.stream = &stream_bls12377;
|
||||
@@ -86,7 +89,13 @@ fn main() {
|
||||
println!("Executing bn254 NTT on device...");
|
||||
#[cfg(feature = "profile")]
|
||||
let start = Instant::now();
|
||||
ntt::ntt(&scalars, ntt::NTTDir::kForward, &cfg, &mut ntt_results).unwrap();
|
||||
ntt::ntt(
|
||||
HostSlice::from_slice(&scalars),
|
||||
ntt::NTTDir::kForward,
|
||||
&cfg,
|
||||
&mut ntt_results[..],
|
||||
)
|
||||
.unwrap();
|
||||
#[cfg(feature = "profile")]
|
||||
println!(
|
||||
"ICICLE BN254 NTT on size 2^{log_size} took: {} μs",
|
||||
@@ -99,10 +108,10 @@ fn main() {
|
||||
#[cfg(feature = "profile")]
|
||||
let start = Instant::now();
|
||||
ntt::ntt(
|
||||
&scalars_bls12377,
|
||||
HostSlice::from_slice(&scalars_bls12377),
|
||||
ntt::NTTDir::kForward,
|
||||
&cfg_bls12377,
|
||||
&mut ntt_results_bls12377,
|
||||
&mut ntt_results_bls12377[..],
|
||||
)
|
||||
.unwrap();
|
||||
#[cfg(feature = "profile")]
|
||||
@@ -119,7 +128,7 @@ fn main() {
|
||||
.unwrap();
|
||||
let mut host_bn254_results = vec![ScalarField::zero(); size];
|
||||
ntt_results
|
||||
.copy_to_host(&mut host_bn254_results[..])
|
||||
.copy_to_host(HostSlice::from_mut_slice(&mut host_bn254_results[..]))
|
||||
.unwrap();
|
||||
|
||||
stream_bls12377
|
||||
@@ -127,19 +136,17 @@ fn main() {
|
||||
.unwrap();
|
||||
let mut host_bls12377_results = vec![BLS12377ScalarField::zero(); size];
|
||||
ntt_results_bls12377
|
||||
.copy_to_host(&mut host_bls12377_results[..])
|
||||
.copy_to_host(HostSlice::from_mut_slice(&mut host_bls12377_results[..]))
|
||||
.unwrap();
|
||||
|
||||
println!("Checking against arkworks...");
|
||||
let mut ark_scalars: Vec<Bn254Fr> = scalars
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|scalar| scalar.to_ark())
|
||||
.collect();
|
||||
let bn254_domain = <Radix2EvaluationDomain<Bn254Fr> as EvaluationDomain<Bn254Fr>>::new(size).unwrap();
|
||||
|
||||
let mut ark_scalars_bls12377: Vec<Bls12377Fr> = scalars_bls12377
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|scalar| scalar.to_ark())
|
||||
.collect();
|
||||
|
||||
14
examples/rust/polynomials/Cargo.toml
Normal file
14
examples/rust/polynomials/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "polynomials"
|
||||
version = "1.2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
icicle-cuda-runtime = { path = "../../../wrappers/rust/icicle-cuda-runtime" }
|
||||
icicle-core = { path = "../../../wrappers/rust/icicle-core" }
|
||||
icicle-bn254 = { path = "../../../wrappers/rust/icicle-curves/icicle-bn254" }
|
||||
icicle-babybear = { path = "../../../wrappers/rust/icicle-fields/icicle-babybear" }
|
||||
clap = { version = "<=4.4.12", features = ["derive"] }
|
||||
|
||||
[features]
|
||||
profile = []
|
||||
101
examples/rust/polynomials/src/main.rs
Normal file
101
examples/rust/polynomials/src/main.rs
Normal file
@@ -0,0 +1,101 @@
|
||||
use icicle_babybear::field::ScalarField as babybearScalar;
|
||||
use icicle_babybear::polynomials::DensePolynomial as PolynomialBabyBear;
|
||||
use icicle_bn254::curve::ScalarField as bn254Scalar;
|
||||
use icicle_bn254::polynomials::DensePolynomial as PolynomialBn254;
|
||||
|
||||
use icicle_cuda_runtime::{
|
||||
device_context::DeviceContext,
|
||||
memory::{DeviceVec, HostSlice},
|
||||
};
|
||||
|
||||
use icicle_core::{
|
||||
ntt::{get_root_of_unity, initialize_domain},
|
||||
polynomials::UnivariatePolynomial,
|
||||
traits::{FieldImpl, GenerateRandom},
|
||||
};
|
||||
|
||||
#[cfg(feature = "profile")]
|
||||
use std::time::Instant;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
struct Args {
|
||||
/// Size of NTT to run (20 for 2^20)
|
||||
#[arg(short, long, default_value_t = 20)]
|
||||
max_ntt_log_size: u8,
|
||||
#[arg(short, long, default_value_t = 15)]
|
||||
poly_log_size: u8,
|
||||
}
|
||||
|
||||
fn init(max_ntt_size: u64) {
|
||||
// initialize NTT domain for all fields!. Polynomials ops relies on NTT.
|
||||
let rou_bn254: bn254Scalar = get_root_of_unity(max_ntt_size);
|
||||
let ctx = DeviceContext::default();
|
||||
initialize_domain(rou_bn254, &ctx, false /*=fast twiddles mode*/).unwrap();
|
||||
|
||||
let rou_babybear: babybearScalar = get_root_of_unity(max_ntt_size);
|
||||
initialize_domain(rou_babybear, &ctx, false /*=fast twiddles mode*/).unwrap();
|
||||
|
||||
// initialize the cuda backend for polynomials
|
||||
// make sure to initialize it per field
|
||||
PolynomialBn254::init_cuda_backend();
|
||||
PolynomialBabyBear::init_cuda_backend();
|
||||
}
|
||||
|
||||
fn randomize_poly<P>(size: usize, from_coeffs: bool) -> P
|
||||
where
|
||||
P: UnivariatePolynomial,
|
||||
P::Field: FieldImpl,
|
||||
P::FieldConfig: GenerateRandom<P::Field>,
|
||||
{
|
||||
let coeffs_or_evals = P::FieldConfig::generate_random(size);
|
||||
let p = if from_coeffs {
|
||||
P::from_coeffs(HostSlice::from_slice(&coeffs_or_evals), size)
|
||||
} else {
|
||||
P::from_rou_evals(HostSlice::from_slice(&coeffs_or_evals), size)
|
||||
};
|
||||
p
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
init(1 << args.max_ntt_log_size);
|
||||
|
||||
// randomize three polynomials f,g,h over bn254 scalar field
|
||||
let poly_size = 1 << args.poly_log_size;
|
||||
let f = randomize_poly::<PolynomialBn254>(poly_size, true /*from random coeffs*/);
|
||||
let g = randomize_poly::<PolynomialBn254>(poly_size / 2, true /*from random coeffs*/);
|
||||
let h = randomize_poly::<PolynomialBn254>(poly_size / 4, false /*from random evaluations on rou*/);
|
||||
|
||||
// randomize two polynomials over babybear field
|
||||
let f_babybear = randomize_poly::<PolynomialBabyBear>(poly_size, true /*from random coeffs*/);
|
||||
let g_babybear = randomize_poly::<PolynomialBabyBear>(poly_size / 2, true /*from random coeffs*/);
|
||||
|
||||
// Arithmetic
|
||||
let t0 = &f + &g;
|
||||
let t1 = &f * &h;
|
||||
let (q, r) = t1.divide(&t0); // computes q,r for t1(x)=q(x)*t0(x)+r(x)
|
||||
|
||||
let _r_babybear = &f_babybear * &g_babybear;
|
||||
|
||||
// check degree
|
||||
let _r_degree = r.degree();
|
||||
|
||||
// evaluate in single domain point
|
||||
let five = bn254Scalar::from_u32(5);
|
||||
let q_at_five = q.eval(&five);
|
||||
|
||||
// evaluate on domain. Note: domain and image can be either Host or Device slice.
|
||||
// in this example domain in on host and evals on device.
|
||||
let host_domain = [five, bn254Scalar::from_u32(30)];
|
||||
let mut device_image = DeviceVec::<bn254Scalar>::cuda_malloc(host_domain.len()).unwrap();
|
||||
t1.eval_on_domain(HostSlice::from_slice(&host_domain), &mut device_image[..]);
|
||||
|
||||
// slicing
|
||||
let o = h.odd();
|
||||
let e = h.even();
|
||||
let fold = &e + &(&o * &q_at_five); // e(x) + o(x)*scalar
|
||||
|
||||
let _coeff = fold.get_coeff(2); // coeff of x^2
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use icicle_cuda_runtime::device_context::DeviceContext;
|
||||
|
||||
use icicle_core::poseidon::{load_optimized_poseidon_constants, poseidon_hash_many, PoseidonConfig};
|
||||
use icicle_core::traits::FieldImpl;
|
||||
use icicle_cuda_runtime::memory::HostOrDeviceSlice;
|
||||
use icicle_cuda_runtime::memory::HostSlice;
|
||||
|
||||
#[cfg(feature = "profile")]
|
||||
use std::time::Instant;
|
||||
@@ -25,23 +25,29 @@ fn main() {
|
||||
|
||||
println!("Running Icicle Examples: Rust Poseidon Hash");
|
||||
let arity = 2u32;
|
||||
println!("---------------------- Loading optimized Poseidon constants for arity={} ------------------------", arity);
|
||||
println!(
|
||||
"---------------------- Loading optimized Poseidon constants for arity={} ------------------------",
|
||||
arity
|
||||
);
|
||||
let ctx = DeviceContext::default();
|
||||
let constants = load_optimized_poseidon_constants::<F>(arity, &ctx).unwrap();
|
||||
let config = PoseidonConfig::default();
|
||||
|
||||
println!("---------------------- Input size 2^{}={} ------------------------", size, test_size);
|
||||
let inputs = vec![F::one(); test_size * arity as usize];
|
||||
let outputs = vec![F::zero(); test_size];
|
||||
let mut input_slice = HostOrDeviceSlice::on_host(inputs);
|
||||
let mut output_slice = HostOrDeviceSlice::on_host(outputs);
|
||||
println!(
|
||||
"---------------------- Input size 2^{}={} ------------------------",
|
||||
size, test_size
|
||||
);
|
||||
let mut inputs = vec![F::one(); test_size * arity as usize];
|
||||
let mut outputs = vec![F::zero(); test_size];
|
||||
let input_slice = HostSlice::from_mut_slice(&mut inputs);
|
||||
let output_slice = HostSlice::from_mut_slice(&mut outputs);
|
||||
|
||||
println!("Executing BLS12-381 Poseidon Hash on device...");
|
||||
#[cfg(feature = "profile")]
|
||||
let start = Instant::now();
|
||||
poseidon_hash_many::<F>(
|
||||
&mut input_slice,
|
||||
&mut output_slice,
|
||||
input_slice,
|
||||
output_slice,
|
||||
test_size as u32,
|
||||
arity as u32,
|
||||
&constants,
|
||||
@@ -49,5 +55,10 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
#[cfg(feature = "profile")]
|
||||
println!("ICICLE BLS12-381 Poseidon Hash on size 2^{size} took: {} μs", start.elapsed().as_micros());
|
||||
}
|
||||
println!(
|
||||
"ICICLE BLS12-381 Poseidon Hash on size 2^{size} took: {} μs",
|
||||
start
|
||||
.elapsed()
|
||||
.as_micros()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,169 +1,62 @@
|
||||
cmake_minimum_required(VERSION 3.18)
|
||||
|
||||
# GoogleTest requires at least C++14
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CUDA_STANDARD 17)
|
||||
set(CMAKE_CUDA_STANDARD_REQUIRED TRUE)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
|
||||
|
||||
if("$ENV{ICICLE_PIC}" STREQUAL "OFF" OR ICICLE_PIC STREQUAL "OFF")
|
||||
message(WARNING "Note that PIC (position-independent code) is disabled.")
|
||||
else()
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
|
||||
# add the target cuda architectures
|
||||
# each additional architecture increases the compilation time and output file size
|
||||
if(${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH})
|
||||
else()
|
||||
find_program(_nvidia_smi "nvidia-smi")
|
||||
|
||||
if(_nvidia_smi)
|
||||
set(DETECT_GPU_COUNT_NVIDIA_SMI 0)
|
||||
|
||||
# execute nvidia-smi -L to get a short list of GPUs available
|
||||
exec_program(${_nvidia_smi_path} ARGS -L
|
||||
OUTPUT_VARIABLE _nvidia_smi_out
|
||||
RETURN_VALUE _nvidia_smi_ret)
|
||||
|
||||
# process the stdout of nvidia-smi
|
||||
if(_nvidia_smi_ret EQUAL 0)
|
||||
# convert string with newlines to list of strings
|
||||
string(REGEX REPLACE "\n" ";" _nvidia_smi_out "${_nvidia_smi_out}")
|
||||
|
||||
foreach(_line ${_nvidia_smi_out})
|
||||
if(_line MATCHES "^GPU [0-9]+:")
|
||||
math(EXPR DETECT_GPU_COUNT_NVIDIA_SMI "${DETECT_GPU_COUNT_NVIDIA_SMI}+1")
|
||||
|
||||
# the UUID is not very useful for the user, remove it
|
||||
string(REGEX REPLACE " \\(UUID:.*\\)" "" _gpu_info "${_line}")
|
||||
|
||||
if(NOT _gpu_info STREQUAL "")
|
||||
list(APPEND DETECT_GPU_INFO "${_gpu_info}")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
check_num_gpu_info(${DETECT_GPU_COUNT_NVIDIA_SMI} DETECT_GPU_INFO)
|
||||
set(DETECT_GPU_COUNT ${DETECT_GPU_COUNT_NVIDIA_SMI})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# ##
|
||||
if(DETECT_GPU_COUNT GREATER 0)
|
||||
set(CMAKE_CUDA_ARCHITECTURES native) # do native
|
||||
else()
|
||||
# no GPUs found, like on Github CI runners
|
||||
set(CMAKE_CUDA_ARCHITECTURES 50) # some safe value
|
||||
endif()
|
||||
endif()
|
||||
|
||||
project(icicle LANGUAGES CUDA CXX)
|
||||
# Check CUDA version and, if possible, enable multi-threaded compilation
|
||||
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL "12.2")
|
||||
message(STATUS "Using multi-threaded CUDA compilation.")
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --split-compile 0")
|
||||
else()
|
||||
message(STATUS "Can't use multi-threaded CUDA compilation.")
|
||||
endif()
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "")
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -lineinfo")
|
||||
include_directories("${CMAKE_SOURCE_DIR}")
|
||||
|
||||
include(cmake/Common.cmake)
|
||||
include(cmake/FieldsCommon.cmake)
|
||||
include(cmake/CurvesCommon.cmake)
|
||||
|
||||
# when adding a new curve/field, append its name to the end of this list
|
||||
set(SUPPORTED_CURVES bn254;bls12_381;bls12_377;bw6_761;grumpkin)
|
||||
set(SUPPORTED_CURVES_WITH_POSEIDON bn254;bls12_381;bls12_377;bw6_761;grumpkin)
|
||||
SET(SUPPORTED_CURVES_WITHOUT_NTT grumpkin)
|
||||
set_env()
|
||||
set_gpu_env()
|
||||
|
||||
set(IS_CURVE_SUPPORTED FALSE)
|
||||
set(I 0)
|
||||
foreach (SUPPORTED_CURVE ${SUPPORTED_CURVES})
|
||||
math(EXPR I "${I} + 1")
|
||||
if (CURVE STREQUAL SUPPORTED_CURVE)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DCURVE_ID=${I}")
|
||||
set(IS_CURVE_SUPPORTED TRUE)
|
||||
endif ()
|
||||
endforeach()
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||
|
||||
if (NOT IS_CURVE_SUPPORTED)
|
||||
message( FATAL_ERROR "The value of CURVE variable: ${CURVE} is not one of the supported curves: ${SUPPORTED_CURVES}" )
|
||||
option(DEVMODE "Enable development mode" OFF)
|
||||
option(EXT_FIELD "Build extension field" OFF)
|
||||
option(G2 "Build G2" OFF)
|
||||
option(MSM "Build MSM" ON)
|
||||
option(ECNTT "Build ECNTT" OFF)
|
||||
option(BUILD_HASH "Build hash functions" OFF)
|
||||
option(BUILD_TESTS "Build unit tests" OFF)
|
||||
option(BUILD_BENCHMARKS "Build benchmarks" OFF)
|
||||
# add options here
|
||||
|
||||
if((DEFINED CURVE) AND (DEFINED FIELD))
|
||||
message( FATAL_ERROR "CURVE and FIELD cannot be defined at the same time" )
|
||||
endif ()
|
||||
|
||||
if (DEVMODE STREQUAL "ON")
|
||||
if (DEVMODE)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O0 --ptxas-options=-O0 --ptxas-options=-allow-expensive-optimizations=false -DDEVMODE=ON")
|
||||
endif ()
|
||||
|
||||
if (G2_DEFINED STREQUAL "ON")
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DG2_DEFINED=ON")
|
||||
if(DEFINED FIELD)
|
||||
check_field()
|
||||
add_subdirectory(src/fields)
|
||||
endif ()
|
||||
|
||||
if (ECNTT_DEFINED STREQUAL "ON")
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DECNTT_DEFINED=ON")
|
||||
if(DEFINED CURVE)
|
||||
check_curve()
|
||||
set(FIELD ${CURVE})
|
||||
add_subdirectory(src/fields)
|
||||
add_subdirectory(src/curves)
|
||||
endif ()
|
||||
|
||||
option(BUILD_TESTS "Build tests" OFF)
|
||||
|
||||
if (NOT BUILD_TESTS)
|
||||
|
||||
message(STATUS "Building without tests.")
|
||||
|
||||
if (CURVE IN_LIST SUPPORTED_CURVES_WITH_POSEIDON)
|
||||
list(APPEND ICICLE_SOURCES appUtils/poseidon/poseidon.cu)
|
||||
list(APPEND ICICLE_SOURCES appUtils/tree/merkle.cu)
|
||||
endif()
|
||||
|
||||
if (NOT CURVE IN_LIST SUPPORTED_CURVES_WITHOUT_NTT)
|
||||
list(APPEND ICICLE_SOURCES appUtils/ntt/ntt.cu)
|
||||
list(APPEND ICICLE_SOURCES appUtils/ntt/kernel_ntt.cu)
|
||||
if(ECNTT_DEFINED STREQUAL "ON")
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DECNTT_DEFINED=ON")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_library(
|
||||
icicle
|
||||
utils/vec_ops.cu
|
||||
utils/mont.cu
|
||||
primitives/field.cu
|
||||
primitives/projective.cu
|
||||
appUtils/msm/msm.cu
|
||||
${ICICLE_SOURCES}
|
||||
)
|
||||
set_target_properties(icicle PROPERTIES OUTPUT_NAME "ingo_${CURVE}")
|
||||
target_compile_definitions(icicle PRIVATE CURVE=${CURVE})
|
||||
|
||||
else()
|
||||
|
||||
message(STATUS "Building tests.")
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/refs/tags/v1.13.0.zip
|
||||
)
|
||||
# For Windows: Prevent overriding the parent project's compiler/linker settings
|
||||
|
||||
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
|
||||
FetchContent_MakeAvailable(googletest)
|
||||
|
||||
enable_testing()
|
||||
|
||||
add_executable(
|
||||
runner
|
||||
tests/runner.cu
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
runner
|
||||
GTest::gtest_main
|
||||
)
|
||||
|
||||
include(GoogleTest)
|
||||
set_target_properties(runner PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
gtest_discover_tests(runner)
|
||||
|
||||
if (G2)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DG2")
|
||||
endif ()
|
||||
|
||||
if (EXT_FIELD)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DEXT_FIELD")
|
||||
endif ()
|
||||
|
||||
if(BUILD_HASH)
|
||||
add_subdirectory(src/hash)
|
||||
endif ()
|
||||
|
||||
if (BUILD_TESTS)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
||||
|
||||
if (BUILD_BENCHMARKS)
|
||||
add_subdirectory(benchmarks)
|
||||
endif()
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
test_msm:
|
||||
mkdir -p work
|
||||
nvcc -o work/test_msm -std=c++17 -I. -I../.. tests/msm_test.cu
|
||||
work/test_msm
|
||||
@@ -1,3 +0,0 @@
|
||||
test_merkle:
|
||||
nvcc -o test_merkle -I. -I../.. test.cu
|
||||
./test_merkle
|
||||
5
icicle/benchmarks/CMakeLists.txt
Normal file
5
icicle/benchmarks/CMakeLists.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
add_executable(benches benches.cu)
|
||||
target_link_libraries(benches benchmark::benchmark)
|
||||
target_include_directories(benches PUBLIC ${CMAKE_SOURCE_DIR}/include/)
|
||||
find_package(benchmark REQUIRED)
|
||||
25
icicle/benchmarks/README.md
Normal file
25
icicle/benchmarks/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# How to use benchmarks
|
||||
|
||||
ICICLE uses [google benchmarks](https://github.com/google/benchmark) to measure the performance of primitives.
|
||||
|
||||
To run benchmarks, make sure you have everything installed to run ICICLE (see top-level README for that). Next, you need to install google benchmarks library as described in their [documentation](https://github.com/google/benchmark?tab=readme-ov-file#installation). When running benchmarks, export the path to this installation:
|
||||
|
||||
```
|
||||
export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH:<path-to-google-benchmarks-build-folder>
|
||||
```
|
||||
|
||||
Then to benchmark field arithmetic, say, on `babybear` field, run:
|
||||
|
||||
```
|
||||
cmake -UCURVE -UFIELD -UG2 -UEXT_FIELD -DFIELD=babybear -DEXT_FIELD=ON -S . -B build;
|
||||
cmake --build build;
|
||||
build/benches --benchmark_counters_tabular=true
|
||||
```
|
||||
|
||||
`-U` parameters are needed to clear variables from previous runs and `EXT_FIELD` can be disabled if benhcmarking the extension field is not needed. To benchmark a curve, say, `bn254`, change the first `cmake` call to:
|
||||
|
||||
```
|
||||
cmake -UCURVE -UFIELD -UG2 -UEXT_FIELD -DCURVE=bn254 -S . -B build;
|
||||
```
|
||||
|
||||
Benchmarks measure throughput of very cheap operations like field multiplication or EC addition by repeating them very many times in parallel, so throughput is the main metric to look at.
|
||||
6
icicle/benchmarks/benches.cu
Normal file
6
icicle/benchmarks/benches.cu
Normal file
@@ -0,0 +1,6 @@
|
||||
#include "field_benchmarks.cu"
|
||||
#ifdef CURVE_ID
|
||||
#include "curve_benchmarks.cu"
|
||||
#endif
|
||||
|
||||
BENCHMARK_MAIN();
|
||||
79
icicle/benchmarks/curve_benchmarks.cu
Normal file
79
icicle/benchmarks/curve_benchmarks.cu
Normal file
@@ -0,0 +1,79 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
#include "utils/test_functions.cuh"
|
||||
#include "curves/curve_config.cuh"
|
||||
|
||||
using namespace curve_config;
|
||||
using namespace benchmark;
|
||||
|
||||
static void BM_MixedECAdd(State& state)
|
||||
{
|
||||
constexpr int N = 128;
|
||||
int n = state.range(0) / N;
|
||||
projective_t* points1;
|
||||
affine_t* points2;
|
||||
assert(!cudaMalloc(&points1, n * sizeof(projective_t)));
|
||||
assert(!cudaMalloc(&points2, n * sizeof(affine_t)));
|
||||
|
||||
projective_t* h_points1 = (projective_t*)malloc(n * sizeof(projective_t));
|
||||
affine_t* h_points2 = (affine_t*)malloc(n * sizeof(affine_t));
|
||||
projective_t::rand_host_many(h_points1, n);
|
||||
projective_t::rand_host_many_affine(h_points2, n);
|
||||
cudaMemcpy(points1, h_points1, sizeof(projective_t) * n, cudaMemcpyHostToDevice);
|
||||
cudaMemcpy(points2, h_points2, sizeof(affine_t) * n, cudaMemcpyHostToDevice);
|
||||
|
||||
for (auto _ : state) {
|
||||
cudaEvent_t start, stop;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start);
|
||||
assert((vec_add<projective_t, affine_t, N>(points1, points2, points1, n)) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(0) == cudaSuccess);
|
||||
cudaEventRecord(stop);
|
||||
|
||||
float milliseconds = 0;
|
||||
cudaEventElapsedTime(&milliseconds, start, stop);
|
||||
|
||||
state.SetIterationTime((double)(milliseconds / 1000));
|
||||
}
|
||||
state.counters["Throughput"] = Counter(state.range(0), Counter::kIsRate | Counter::kIsIterationInvariant);
|
||||
cudaFree(points1);
|
||||
cudaFree(points2);
|
||||
}
|
||||
|
||||
static void BM_FullECAdd(benchmark::State& state)
|
||||
{
|
||||
constexpr int N = 128;
|
||||
int n = state.range(0) / N;
|
||||
projective_t* points1;
|
||||
projective_t* points2;
|
||||
assert(!cudaMalloc(&points1, n * sizeof(projective_t)));
|
||||
assert(!cudaMalloc(&points2, n * sizeof(projective_t)));
|
||||
|
||||
projective_t* h_points1 = (projective_t*)malloc(n * sizeof(projective_t));
|
||||
projective_t* h_points2 = (projective_t*)malloc(n * sizeof(projective_t));
|
||||
projective_t::rand_host_many(h_points1, n);
|
||||
projective_t::rand_host_many(h_points2, n);
|
||||
cudaMemcpy(points1, h_points1, sizeof(projective_t) * n, cudaMemcpyHostToDevice);
|
||||
cudaMemcpy(points2, h_points2, sizeof(projective_t) * n, cudaMemcpyHostToDevice);
|
||||
|
||||
for (auto _ : state) {
|
||||
cudaEvent_t start, stop;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start);
|
||||
assert((vec_add<projective_t, projective_t, N>(points1, points2, points1, n)) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(0) == cudaSuccess);
|
||||
cudaEventRecord(stop);
|
||||
|
||||
float milliseconds = 0;
|
||||
cudaEventElapsedTime(&milliseconds, start, stop);
|
||||
|
||||
state.SetIterationTime((double)(milliseconds / 1000));
|
||||
}
|
||||
state.counters["Throughput"] = Counter(state.range(0), Counter::kIsRate | Counter::kIsIterationInvariant);
|
||||
cudaFree(points1);
|
||||
cudaFree(points2);
|
||||
}
|
||||
|
||||
BENCHMARK(BM_FullECAdd)->Range(1 << 27, 1 << 27)->Unit(benchmark::kMillisecond);
|
||||
BENCHMARK(BM_MixedECAdd)->Range(1 << 27, 1 << 27)->Unit(benchmark::kMillisecond);
|
||||
108
icicle/benchmarks/field_benchmarks.cu
Normal file
108
icicle/benchmarks/field_benchmarks.cu
Normal file
@@ -0,0 +1,108 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
#include "utils/test_functions.cuh"
|
||||
#include "fields/field_config.cuh"
|
||||
|
||||
using namespace field_config;
|
||||
using namespace benchmark;
|
||||
|
||||
template <class T>
|
||||
static void BM_FieldAdd(State& state)
|
||||
{
|
||||
constexpr int N = 256;
|
||||
int n = state.range(0) / N;
|
||||
T* scalars1;
|
||||
T* scalars2;
|
||||
assert(!cudaMalloc(&scalars1, n * sizeof(T)));
|
||||
assert(!cudaMalloc(&scalars2, n * sizeof(T)));
|
||||
|
||||
assert(device_populate_random<T>(scalars1, n) == cudaSuccess);
|
||||
assert(device_populate_random<T>(scalars2, n) == cudaSuccess);
|
||||
|
||||
for (auto _ : state) {
|
||||
cudaEvent_t start, stop;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start);
|
||||
assert((vec_add<T, T, N>(scalars1, scalars2, scalars1, n)) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(0) == cudaSuccess);
|
||||
cudaEventRecord(stop);
|
||||
|
||||
float milliseconds = 0;
|
||||
cudaEventElapsedTime(&milliseconds, start, stop);
|
||||
|
||||
state.SetIterationTime((double)(milliseconds / 1000));
|
||||
}
|
||||
state.counters["Throughput"] = Counter(state.range(0), Counter::kIsRate | Counter::kIsIterationInvariant);
|
||||
cudaFree(scalars1);
|
||||
cudaFree(scalars2);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void BM_FieldMul(State& state)
|
||||
{
|
||||
constexpr int N = 128;
|
||||
int n = state.range(0) / N;
|
||||
T* scalars1;
|
||||
T* scalars2;
|
||||
assert(!cudaMalloc(&scalars1, n * sizeof(T)));
|
||||
assert(!cudaMalloc(&scalars2, n * sizeof(T)));
|
||||
|
||||
assert(device_populate_random<T>(scalars1, n) == cudaSuccess);
|
||||
assert(device_populate_random<T>(scalars2, n) == cudaSuccess);
|
||||
|
||||
for (auto _ : state) {
|
||||
cudaEvent_t start, stop;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start);
|
||||
assert((vec_mul<T, T, N>(scalars1, scalars2, scalars1, n)) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(0) == cudaSuccess);
|
||||
cudaEventRecord(stop);
|
||||
|
||||
float milliseconds = 0;
|
||||
cudaEventElapsedTime(&milliseconds, start, stop);
|
||||
|
||||
state.SetIterationTime((double)(milliseconds / 1000));
|
||||
}
|
||||
state.counters["Throughput"] = Counter(state.range(0), Counter::kIsRate | Counter::kIsIterationInvariant);
|
||||
cudaFree(scalars1);
|
||||
cudaFree(scalars2);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void BM_FieldSqr(State& state)
|
||||
{
|
||||
constexpr int N = 128;
|
||||
int n = state.range(0) / N;
|
||||
T* scalars;
|
||||
assert(!cudaMalloc(&scalars, n * sizeof(T)));
|
||||
|
||||
assert(device_populate_random<T>(scalars, n) == cudaSuccess);
|
||||
|
||||
for (auto _ : state) {
|
||||
cudaEvent_t start, stop;
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
cudaEventRecord(start);
|
||||
assert((field_vec_sqr<T, N>(scalars, scalars, n)) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(0) == cudaSuccess);
|
||||
cudaEventRecord(stop);
|
||||
|
||||
float milliseconds = 0;
|
||||
cudaEventElapsedTime(&milliseconds, start, stop);
|
||||
|
||||
state.SetIterationTime((double)(milliseconds / 1000));
|
||||
}
|
||||
state.counters["Throughput"] = Counter(state.range(0), Counter::kIsRate | Counter::kIsIterationInvariant);
|
||||
cudaFree(scalars);
|
||||
}
|
||||
|
||||
BENCHMARK(BM_FieldAdd<scalar_t>)->Range(1 << 28, 1 << 28)->Unit(kMicrosecond);
|
||||
BENCHMARK(BM_FieldMul<scalar_t>)->Range(1 << 27, 1 << 27)->Unit(kMicrosecond);
|
||||
BENCHMARK(BM_FieldSqr<scalar_t>)->Range(1 << 27, 1 << 27)->Unit(kMicrosecond);
|
||||
|
||||
#ifdef EXT_FIELD
|
||||
BENCHMARK(BM_FieldAdd<extension_t>)->Range(1 << 28, 1 << 28)->Unit(kMicrosecond);
|
||||
BENCHMARK(BM_FieldMul<extension_t>)->Range(1 << 27, 1 << 27)->Unit(kMicrosecond);
|
||||
BENCHMARK(BM_FieldSqr<extension_t>)->Range(1 << 27, 1 << 27)->Unit(kMicrosecond);
|
||||
#endif
|
||||
72
icicle/cmake/Common.cmake
Normal file
72
icicle/cmake/Common.cmake
Normal file
@@ -0,0 +1,72 @@
|
||||
function(set_env)
|
||||
set(CMAKE_CXX_STANDARD 17 PARENT_SCOPE)
|
||||
set(CMAKE_CUDA_STANDARD 17 PARENT_SCOPE)
|
||||
set(CMAKE_CUDA_STANDARD_REQUIRED TRUE PARENT_SCOPE)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED TRUE PARENT_SCOPE)
|
||||
|
||||
if("$ENV{ICICLE_PIC}" STREQUAL "OFF" OR ICICLE_PIC STREQUAL "OFF")
|
||||
message(WARNING "Note that PIC (position-independent code) is disabled.")
|
||||
else()
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(set_gpu_env)
|
||||
# add the target cuda architectures
|
||||
# each additional architecture increases the compilation time and output file size
|
||||
if(${CMAKE_VERSION} VERSION_LESS "3.24.0")
|
||||
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH} PARENT_SCOPE)
|
||||
else()
|
||||
find_program(_nvidia_smi "nvidia-smi")
|
||||
|
||||
if(_nvidia_smi)
|
||||
set(DETECT_GPU_COUNT_NVIDIA_SMI 0)
|
||||
|
||||
# execute nvidia-smi -L to get a short list of GPUs available
|
||||
exec_program(${_nvidia_smi_path} ARGS -L
|
||||
OUTPUT_VARIABLE _nvidia_smi_out
|
||||
RETURN_VALUE _nvidia_smi_ret)
|
||||
|
||||
# process the stdout of nvidia-smi
|
||||
if(_nvidia_smi_ret EQUAL 0)
|
||||
# convert string with newlines to list of strings
|
||||
string(REGEX REPLACE "\n" ";" _nvidia_smi_out "${_nvidia_smi_out}")
|
||||
|
||||
foreach(_line ${_nvidia_smi_out})
|
||||
if(_line MATCHES "^GPU [0-9]+:")
|
||||
math(EXPR DETECT_GPU_COUNT_NVIDIA_SMI "${DETECT_GPU_COUNT_NVIDIA_SMI}+1")
|
||||
|
||||
# the UUID is not very useful for the user, remove it
|
||||
string(REGEX REPLACE " \\(UUID:.*\\)" "" _gpu_info "${_line}")
|
||||
|
||||
if(NOT _gpu_info STREQUAL "")
|
||||
list(APPEND DETECT_GPU_INFO "${_gpu_info}")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
check_num_gpu_info(${DETECT_GPU_COUNT_NVIDIA_SMI} DETECT_GPU_INFO)
|
||||
set(DETECT_GPU_COUNT ${DETECT_GPU_COUNT_NVIDIA_SMI})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# ##
|
||||
if(DETECT_GPU_COUNT GREATER 0)
|
||||
set(CMAKE_CUDA_ARCHITECTURES native PARENT_SCOPE) # do native
|
||||
else()
|
||||
# no GPUs found, like on Github CI runners
|
||||
set(CMAKE_CUDA_ARCHITECTURES 50 PARENT_SCOPE) # some safe value
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Check CUDA version and, if possible, enable multi-threaded compilation
|
||||
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL "12.2")
|
||||
message(STATUS "Using multi-threaded CUDA compilation.")
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --split-compile 0" PARENT_SCOPE)
|
||||
else()
|
||||
message(STATUS "Can't use multi-threaded CUDA compilation.")
|
||||
endif()
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr" PARENT_SCOPE)
|
||||
set(CMAKE_CUDA_FLAGS_RELEASE "" PARENT_SCOPE)
|
||||
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -lineinfo" PARENT_SCOPE)
|
||||
endfunction()
|
||||
17
icicle/cmake/CurvesCommon.cmake
Normal file
17
icicle/cmake/CurvesCommon.cmake
Normal file
@@ -0,0 +1,17 @@
|
||||
function(check_curve)
|
||||
set(SUPPORTED_CURVES bn254;bls12_381;bls12_377;bw6_761;grumpkin)
|
||||
|
||||
set(IS_CURVE_SUPPORTED FALSE)
|
||||
set(I 0)
|
||||
foreach (SUPPORTED_CURVE ${SUPPORTED_CURVES})
|
||||
math(EXPR I "${I} + 1")
|
||||
if (CURVE STREQUAL SUPPORTED_CURVE)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DCURVE_ID=${I} -DFIELD_ID=${I}" PARENT_SCOPE)
|
||||
set(IS_CURVE_SUPPORTED TRUE)
|
||||
endif ()
|
||||
endforeach()
|
||||
|
||||
if (NOT IS_CURVE_SUPPORTED)
|
||||
message( FATAL_ERROR "The value of CURVE variable: ${CURVE} is not one of the supported curves: ${SUPPORTED_CURVES}" )
|
||||
endif ()
|
||||
endfunction()
|
||||
17
icicle/cmake/FieldsCommon.cmake
Normal file
17
icicle/cmake/FieldsCommon.cmake
Normal file
@@ -0,0 +1,17 @@
|
||||
function(check_field)
|
||||
set(SUPPORTED_FIELDS babybear)
|
||||
|
||||
set(IS_FIELD_SUPPORTED FALSE)
|
||||
set(I 1000)
|
||||
foreach (SUPPORTED_FIELD ${SUPPORTED_FIELDS})
|
||||
math(EXPR I "${I} + 1")
|
||||
if (FIELD STREQUAL SUPPORTED_FIELD)
|
||||
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DFIELD_ID=${I}" PARENT_SCOPE)
|
||||
set(IS_FIELD_SUPPORTED TRUE)
|
||||
endif ()
|
||||
endforeach()
|
||||
|
||||
if (NOT IS_FIELD_SUPPORTED)
|
||||
message( FATAL_ERROR "The value of FIELD variable: ${FIELD} is not one of the supported fields: ${SUPPORTED_FIELDS}" )
|
||||
endif ()
|
||||
endfunction()
|
||||
@@ -1,89 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef INDEX_H
|
||||
#define INDEX_H
|
||||
|
||||
#define BN254 1
|
||||
#define BLS12_381 2
|
||||
#define BLS12_377 3
|
||||
#define BW6_761 4
|
||||
#define GRUMPKIN 5
|
||||
|
||||
#include "primitives/field.cuh"
|
||||
#include "primitives/projective.cuh"
|
||||
#if defined(G2_DEFINED)
|
||||
#include "primitives/extension_field.cuh"
|
||||
#endif
|
||||
|
||||
#if CURVE_ID == BN254
|
||||
#include "bn254_params.cuh"
|
||||
using namespace bn254;
|
||||
#elif CURVE_ID == BLS12_381
|
||||
#include "bls12_381_params.cuh"
|
||||
using namespace bls12_381;
|
||||
#elif CURVE_ID == BLS12_377
|
||||
#include "bls12_377_params.cuh"
|
||||
using namespace bls12_377;
|
||||
#elif CURVE_ID == BW6_761
|
||||
#include "bw6_761_params.cuh"
|
||||
using namespace bw6_761;
|
||||
#elif CURVE_ID == GRUMPKIN
|
||||
#include "grumpkin_params.cuh"
|
||||
using namespace grumpkin;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @namespace curve_config
|
||||
* Namespace with type definitions for short Weierstrass pairing-friendly [elliptic
|
||||
* curves](https://hyperelliptic.org/EFD/g1p/auto-shortw.html). Here, concrete types are created in accordance
|
||||
* with the `-DCURVE` env variable passed during build.
|
||||
*/
|
||||
namespace curve_config {
|
||||
/**
|
||||
* Scalar field of the curve. Is always a prime field.
|
||||
*/
|
||||
typedef Field<fp_config> scalar_t;
|
||||
/**
|
||||
* Base field of G1 curve. Is always a prime field.
|
||||
*/
|
||||
typedef Field<fq_config> point_field_t;
|
||||
static constexpr point_field_t generator_x = point_field_t{g1_gen_x};
|
||||
static constexpr point_field_t generator_y = point_field_t{g1_gen_y};
|
||||
static constexpr point_field_t b = point_field_t{weierstrass_b};
|
||||
/**
|
||||
* [Projective representation](https://hyperelliptic.org/EFD/g1p/auto-shortw-projective.html)
|
||||
* of G1 curve consisting of three coordinates of type [point_field_t](point_field_t).
|
||||
*/
|
||||
typedef Projective<point_field_t, scalar_t, b, generator_x, generator_y> projective_t;
|
||||
/**
|
||||
* Affine representation of G1 curve consisting of two coordinates of type [point_field_t](point_field_t).
|
||||
*/
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
|
||||
#if defined(G2_DEFINED)
|
||||
#if CURVE_ID == BW6_761
|
||||
typedef point_field_t g2_point_field_t;
|
||||
static constexpr g2_point_field_t g2_generator_x = g2_point_field_t{g2_gen_x};
|
||||
static constexpr g2_point_field_t g2_generator_y = g2_point_field_t{g2_gen_y};
|
||||
static constexpr g2_point_field_t g2_b = g2_point_field_t{g2_weierstrass_b};
|
||||
#else
|
||||
typedef ExtensionField<fq_config> g2_point_field_t;
|
||||
static constexpr g2_point_field_t g2_generator_x =
|
||||
g2_point_field_t{point_field_t{g2_gen_x_re}, point_field_t{g2_gen_x_im}};
|
||||
static constexpr g2_point_field_t g2_generator_y =
|
||||
g2_point_field_t{point_field_t{g2_gen_y_re}, point_field_t{g2_gen_y_im}};
|
||||
static constexpr g2_point_field_t g2_b =
|
||||
g2_point_field_t{point_field_t{weierstrass_b_g2_re}, point_field_t{weierstrass_b_g2_im}};
|
||||
#endif
|
||||
/**
|
||||
* [Projective representation](https://hyperelliptic.org/EFD/g1p/auto-shortw-projective.html) of G2 curve.
|
||||
*/
|
||||
typedef Projective<g2_point_field_t, scalar_t, g2_b, g2_generator_x, g2_generator_y> g2_projective_t;
|
||||
/**
|
||||
* Affine representation of G1 curve.
|
||||
*/
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
#endif
|
||||
|
||||
} // namespace curve_config
|
||||
|
||||
#endif
|
||||
73
icicle/include/api/babybear.h
Normal file
73
icicle/include/api/babybear.h
Normal file
@@ -0,0 +1,73 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef BABYBEAR_API_H
|
||||
#define BABYBEAR_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "fields/stark_fields/babybear.cuh"
|
||||
#include "ntt/ntt.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
|
||||
extern "C" cudaError_t babybear_extension_ntt_cuda(
|
||||
const babybear::extension_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<babybear::scalar_t>& config, babybear::extension_t* output);
|
||||
|
||||
extern "C" cudaError_t babybear_mul_cuda(
|
||||
babybear::scalar_t* vec_a, babybear::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_add_cuda(
|
||||
babybear::scalar_t* vec_a, babybear::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_sub_cuda(
|
||||
babybear::scalar_t* vec_a, babybear::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_transpose_matrix_cuda(
|
||||
const babybear::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
babybear::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void babybear_generate_scalars(babybear::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t babybear_scalar_convert_montgomery(
|
||||
babybear::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t babybear_initialize_domain(
|
||||
babybear::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t babybear_ntt_cuda(
|
||||
const babybear::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<babybear::scalar_t>& config, babybear::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t babybear_release_domain(device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" void babybear_extension_generate_scalars(babybear::extension_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t babybear_extension_scalar_convert_montgomery(
|
||||
babybear::extension_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t babybear_extension_mul_cuda(
|
||||
babybear::extension_t* vec_a, babybear::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_extension_add_cuda(
|
||||
babybear::extension_t* vec_a, babybear::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_extension_sub_cuda(
|
||||
babybear::extension_t* vec_a, babybear::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, babybear::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t babybear_extension_transpose_matrix_cuda(
|
||||
const babybear::extension_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
babybear::extension_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
#endif
|
||||
132
icicle/include/api/bls12_377.h
Normal file
132
icicle/include/api/bls12_377.h
Normal file
@@ -0,0 +1,132 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef BLS12_377_API_H
|
||||
#define BLS12_377_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "curves/params/bls12_377.cuh"
|
||||
#include "ntt/ntt.cuh"
|
||||
#include "msm/msm.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
#include "poseidon/poseidon.cuh"
|
||||
#include "poseidon/tree/merkle.cuh"
|
||||
|
||||
extern "C" cudaError_t bls12_377_g2_precompute_msm_bases_cuda(
|
||||
bls12_377::g2_affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bls12_377::g2_affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bls12_377_g2_msm_cuda(
|
||||
const bls12_377::scalar_t* scalars, const bls12_377::g2_affine_t* points, int msm_size, msm::MSMConfig& config, bls12_377::g2_projective_t* out);
|
||||
|
||||
extern "C" cudaError_t bls12_377_precompute_msm_bases_cuda(
|
||||
bls12_377::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bls12_377::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bls12_377_msm_cuda(
|
||||
const bls12_377::scalar_t* scalars, const bls12_377::affine_t* points, int msm_size, msm::MSMConfig& config, bls12_377::projective_t* out);
|
||||
|
||||
extern "C" bool bls12_377_g2_eq(bls12_377::g2_projective_t* point1, bls12_377::g2_projective_t* point2);
|
||||
|
||||
extern "C" void bls12_377_g2_to_affine(bls12_377::g2_projective_t* point, bls12_377::g2_affine_t* point_out);
|
||||
|
||||
extern "C" void bls12_377_g2_generate_projective_points(bls12_377::g2_projective_t* points, int size);
|
||||
|
||||
extern "C" void bls12_377_g2_generate_affine_points(bls12_377::g2_affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_377_g2_affine_convert_montgomery(
|
||||
bls12_377::g2_affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_377_g2_projective_convert_montgomery(
|
||||
bls12_377::g2_projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_377_ecntt_cuda(
|
||||
const bls12_377::projective_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bls12_377::scalar_t>& config, bls12_377::projective_t* output);
|
||||
|
||||
extern "C" bool bls12_377_eq(bls12_377::projective_t* point1, bls12_377::projective_t* point2);
|
||||
|
||||
extern "C" void bls12_377_to_affine(bls12_377::projective_t* point, bls12_377::affine_t* point_out);
|
||||
|
||||
extern "C" void bls12_377_generate_projective_points(bls12_377::projective_t* points, int size);
|
||||
|
||||
extern "C" void bls12_377_generate_affine_points(bls12_377::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_377_affine_convert_montgomery(
|
||||
bls12_377::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_377_projective_convert_montgomery(
|
||||
bls12_377::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_377_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const bls12_377::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<bls12_377::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t bls12_377_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<bls12_377::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t bls12_377_poseidon_hash_cuda(
|
||||
bls12_377::scalar_t* input,
|
||||
bls12_377::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<bls12_377::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t bls12_377_build_poseidon_merkle_tree(
|
||||
const bls12_377::scalar_t* leaves,
|
||||
bls12_377::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<bls12_377::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
|
||||
extern "C" cudaError_t bls12_377_mul_cuda(
|
||||
bls12_377::scalar_t* vec_a, bls12_377::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_377::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_377_add_cuda(
|
||||
bls12_377::scalar_t* vec_a, bls12_377::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_377::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_377_sub_cuda(
|
||||
bls12_377::scalar_t* vec_a, bls12_377::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_377::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_377_transpose_matrix_cuda(
|
||||
const bls12_377::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
bls12_377::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void bls12_377_generate_scalars(bls12_377::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_377_scalar_convert_montgomery(
|
||||
bls12_377::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_377_initialize_domain(
|
||||
bls12_377::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t bls12_377_ntt_cuda(
|
||||
const bls12_377::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bls12_377::scalar_t>& config, bls12_377::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t bls12_377_release_domain(device_context::DeviceContext& ctx);
|
||||
|
||||
#endif
|
||||
132
icicle/include/api/bls12_381.h
Normal file
132
icicle/include/api/bls12_381.h
Normal file
@@ -0,0 +1,132 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef BLS12_381_API_H
|
||||
#define BLS12_381_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "curves/params/bls12_381.cuh"
|
||||
#include "ntt/ntt.cuh"
|
||||
#include "msm/msm.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
#include "poseidon/poseidon.cuh"
|
||||
#include "poseidon/tree/merkle.cuh"
|
||||
|
||||
extern "C" cudaError_t bls12_381_g2_precompute_msm_bases_cuda(
|
||||
bls12_381::g2_affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bls12_381::g2_affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bls12_381_g2_msm_cuda(
|
||||
const bls12_381::scalar_t* scalars, const bls12_381::g2_affine_t* points, int msm_size, msm::MSMConfig& config, bls12_381::g2_projective_t* out);
|
||||
|
||||
extern "C" cudaError_t bls12_381_precompute_msm_bases_cuda(
|
||||
bls12_381::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bls12_381::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bls12_381_msm_cuda(
|
||||
const bls12_381::scalar_t* scalars, const bls12_381::affine_t* points, int msm_size, msm::MSMConfig& config, bls12_381::projective_t* out);
|
||||
|
||||
extern "C" bool bls12_381_g2_eq(bls12_381::g2_projective_t* point1, bls12_381::g2_projective_t* point2);
|
||||
|
||||
extern "C" void bls12_381_g2_to_affine(bls12_381::g2_projective_t* point, bls12_381::g2_affine_t* point_out);
|
||||
|
||||
extern "C" void bls12_381_g2_generate_projective_points(bls12_381::g2_projective_t* points, int size);
|
||||
|
||||
extern "C" void bls12_381_g2_generate_affine_points(bls12_381::g2_affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_381_g2_affine_convert_montgomery(
|
||||
bls12_381::g2_affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_381_g2_projective_convert_montgomery(
|
||||
bls12_381::g2_projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_381_ecntt_cuda(
|
||||
const bls12_381::projective_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bls12_381::scalar_t>& config, bls12_381::projective_t* output);
|
||||
|
||||
extern "C" bool bls12_381_eq(bls12_381::projective_t* point1, bls12_381::projective_t* point2);
|
||||
|
||||
extern "C" void bls12_381_to_affine(bls12_381::projective_t* point, bls12_381::affine_t* point_out);
|
||||
|
||||
extern "C" void bls12_381_generate_projective_points(bls12_381::projective_t* points, int size);
|
||||
|
||||
extern "C" void bls12_381_generate_affine_points(bls12_381::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_381_affine_convert_montgomery(
|
||||
bls12_381::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_381_projective_convert_montgomery(
|
||||
bls12_381::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_381_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const bls12_381::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<bls12_381::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t bls12_381_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<bls12_381::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t bls12_381_poseidon_hash_cuda(
|
||||
bls12_381::scalar_t* input,
|
||||
bls12_381::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<bls12_381::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t bls12_381_build_poseidon_merkle_tree(
|
||||
const bls12_381::scalar_t* leaves,
|
||||
bls12_381::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<bls12_381::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
|
||||
extern "C" cudaError_t bls12_381_mul_cuda(
|
||||
bls12_381::scalar_t* vec_a, bls12_381::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_381::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_381_add_cuda(
|
||||
bls12_381::scalar_t* vec_a, bls12_381::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_381::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_381_sub_cuda(
|
||||
bls12_381::scalar_t* vec_a, bls12_381::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bls12_381::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bls12_381_transpose_matrix_cuda(
|
||||
const bls12_381::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
bls12_381::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void bls12_381_generate_scalars(bls12_381::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t bls12_381_scalar_convert_montgomery(
|
||||
bls12_381::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bls12_381_initialize_domain(
|
||||
bls12_381::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t bls12_381_ntt_cuda(
|
||||
const bls12_381::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bls12_381::scalar_t>& config, bls12_381::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t bls12_381_release_domain(device_context::DeviceContext& ctx);
|
||||
|
||||
#endif
|
||||
132
icicle/include/api/bn254.h
Normal file
132
icicle/include/api/bn254.h
Normal file
@@ -0,0 +1,132 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef BN254_API_H
|
||||
#define BN254_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "curves/params/bn254.cuh"
|
||||
#include "ntt/ntt.cuh"
|
||||
#include "msm/msm.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
#include "poseidon/poseidon.cuh"
|
||||
#include "poseidon/tree/merkle.cuh"
|
||||
|
||||
extern "C" cudaError_t bn254_g2_precompute_msm_bases_cuda(
|
||||
bn254::g2_affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bn254::g2_affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bn254_g2_msm_cuda(
|
||||
const bn254::scalar_t* scalars, const bn254::g2_affine_t* points, int msm_size, msm::MSMConfig& config, bn254::g2_projective_t* out);
|
||||
|
||||
extern "C" cudaError_t bn254_precompute_msm_bases_cuda(
|
||||
bn254::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bn254::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bn254_msm_cuda(
|
||||
const bn254::scalar_t* scalars, const bn254::affine_t* points, int msm_size, msm::MSMConfig& config, bn254::projective_t* out);
|
||||
|
||||
extern "C" bool bn254_g2_eq(bn254::g2_projective_t* point1, bn254::g2_projective_t* point2);
|
||||
|
||||
extern "C" void bn254_g2_to_affine(bn254::g2_projective_t* point, bn254::g2_affine_t* point_out);
|
||||
|
||||
extern "C" void bn254_g2_generate_projective_points(bn254::g2_projective_t* points, int size);
|
||||
|
||||
extern "C" void bn254_g2_generate_affine_points(bn254::g2_affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bn254_g2_affine_convert_montgomery(
|
||||
bn254::g2_affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bn254_g2_projective_convert_montgomery(
|
||||
bn254::g2_projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bn254_ecntt_cuda(
|
||||
const bn254::projective_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bn254::scalar_t>& config, bn254::projective_t* output);
|
||||
|
||||
extern "C" bool bn254_eq(bn254::projective_t* point1, bn254::projective_t* point2);
|
||||
|
||||
extern "C" void bn254_to_affine(bn254::projective_t* point, bn254::affine_t* point_out);
|
||||
|
||||
extern "C" void bn254_generate_projective_points(bn254::projective_t* points, int size);
|
||||
|
||||
extern "C" void bn254_generate_affine_points(bn254::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bn254_affine_convert_montgomery(
|
||||
bn254::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bn254_projective_convert_montgomery(
|
||||
bn254::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bn254_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const bn254::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<bn254::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t bn254_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<bn254::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t bn254_poseidon_hash_cuda(
|
||||
bn254::scalar_t* input,
|
||||
bn254::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<bn254::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t bn254_build_poseidon_merkle_tree(
|
||||
const bn254::scalar_t* leaves,
|
||||
bn254::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<bn254::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
|
||||
extern "C" cudaError_t bn254_mul_cuda(
|
||||
bn254::scalar_t* vec_a, bn254::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bn254::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bn254_add_cuda(
|
||||
bn254::scalar_t* vec_a, bn254::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bn254::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bn254_sub_cuda(
|
||||
bn254::scalar_t* vec_a, bn254::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bn254::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bn254_transpose_matrix_cuda(
|
||||
const bn254::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
bn254::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void bn254_generate_scalars(bn254::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t bn254_scalar_convert_montgomery(
|
||||
bn254::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bn254_initialize_domain(
|
||||
bn254::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t bn254_ntt_cuda(
|
||||
const bn254::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bn254::scalar_t>& config, bn254::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t bn254_release_domain(device_context::DeviceContext& ctx);
|
||||
|
||||
#endif
|
||||
132
icicle/include/api/bw6_761.h
Normal file
132
icicle/include/api/bw6_761.h
Normal file
@@ -0,0 +1,132 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef BW6_761_API_H
|
||||
#define BW6_761_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "curves/params/bw6_761.cuh"
|
||||
#include "ntt/ntt.cuh"
|
||||
#include "msm/msm.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
#include "poseidon/poseidon.cuh"
|
||||
#include "poseidon/tree/merkle.cuh"
|
||||
|
||||
extern "C" cudaError_t bw6_761_g2_precompute_msm_bases_cuda(
|
||||
bw6_761::g2_affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bw6_761::g2_affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bw6_761_g2_msm_cuda(
|
||||
const bw6_761::scalar_t* scalars, const bw6_761::g2_affine_t* points, int msm_size, msm::MSMConfig& config, bw6_761::g2_projective_t* out);
|
||||
|
||||
extern "C" cudaError_t bw6_761_precompute_msm_bases_cuda(
|
||||
bw6_761::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
bw6_761::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t bw6_761_msm_cuda(
|
||||
const bw6_761::scalar_t* scalars, const bw6_761::affine_t* points, int msm_size, msm::MSMConfig& config, bw6_761::projective_t* out);
|
||||
|
||||
extern "C" bool bw6_761_g2_eq(bw6_761::g2_projective_t* point1, bw6_761::g2_projective_t* point2);
|
||||
|
||||
extern "C" void bw6_761_g2_to_affine(bw6_761::g2_projective_t* point, bw6_761::g2_affine_t* point_out);
|
||||
|
||||
extern "C" void bw6_761_g2_generate_projective_points(bw6_761::g2_projective_t* points, int size);
|
||||
|
||||
extern "C" void bw6_761_g2_generate_affine_points(bw6_761::g2_affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bw6_761_g2_affine_convert_montgomery(
|
||||
bw6_761::g2_affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bw6_761_g2_projective_convert_montgomery(
|
||||
bw6_761::g2_projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bw6_761_ecntt_cuda(
|
||||
const bw6_761::projective_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bw6_761::scalar_t>& config, bw6_761::projective_t* output);
|
||||
|
||||
extern "C" bool bw6_761_eq(bw6_761::projective_t* point1, bw6_761::projective_t* point2);
|
||||
|
||||
extern "C" void bw6_761_to_affine(bw6_761::projective_t* point, bw6_761::affine_t* point_out);
|
||||
|
||||
extern "C" void bw6_761_generate_projective_points(bw6_761::projective_t* points, int size);
|
||||
|
||||
extern "C" void bw6_761_generate_affine_points(bw6_761::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t bw6_761_affine_convert_montgomery(
|
||||
bw6_761::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bw6_761_projective_convert_montgomery(
|
||||
bw6_761::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bw6_761_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const bw6_761::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<bw6_761::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t bw6_761_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<bw6_761::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t bw6_761_poseidon_hash_cuda(
|
||||
bw6_761::scalar_t* input,
|
||||
bw6_761::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<bw6_761::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t bw6_761_build_poseidon_merkle_tree(
|
||||
const bw6_761::scalar_t* leaves,
|
||||
bw6_761::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<bw6_761::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
|
||||
extern "C" cudaError_t bw6_761_mul_cuda(
|
||||
bw6_761::scalar_t* vec_a, bw6_761::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bw6_761::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bw6_761_add_cuda(
|
||||
bw6_761::scalar_t* vec_a, bw6_761::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bw6_761::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bw6_761_sub_cuda(
|
||||
bw6_761::scalar_t* vec_a, bw6_761::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, bw6_761::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t bw6_761_transpose_matrix_cuda(
|
||||
const bw6_761::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
bw6_761::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void bw6_761_generate_scalars(bw6_761::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t bw6_761_scalar_convert_montgomery(
|
||||
bw6_761::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t bw6_761_initialize_domain(
|
||||
bw6_761::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t bw6_761_ntt_cuda(
|
||||
const bw6_761::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<bw6_761::scalar_t>& config, bw6_761::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t bw6_761_release_domain(device_context::DeviceContext& ctx);
|
||||
|
||||
#endif
|
||||
94
icicle/include/api/grumpkin.h
Normal file
94
icicle/include/api/grumpkin.h
Normal file
@@ -0,0 +1,94 @@
|
||||
// WARNING: This file is auto-generated by a script.
|
||||
// Any changes made to this file may be overwritten.
|
||||
// Please modify the code generation script instead.
|
||||
// Path to the code generation script: scripts/gen_c_api.py
|
||||
|
||||
#pragma once
|
||||
#ifndef GRUMPKIN_API_H
|
||||
#define GRUMPKIN_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "curves/params/grumpkin.cuh"
|
||||
#include "msm/msm.cuh"
|
||||
#include "vec_ops/vec_ops.cuh"
|
||||
#include "poseidon/poseidon.cuh"
|
||||
#include "poseidon/tree/merkle.cuh"
|
||||
|
||||
extern "C" cudaError_t grumpkin_precompute_msm_bases_cuda(
|
||||
grumpkin::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
grumpkin::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t grumpkin_msm_cuda(
|
||||
const grumpkin::scalar_t* scalars, const grumpkin::affine_t* points, int msm_size, msm::MSMConfig& config, grumpkin::projective_t* out);
|
||||
|
||||
extern "C" bool grumpkin_eq(grumpkin::projective_t* point1, grumpkin::projective_t* point2);
|
||||
|
||||
extern "C" void grumpkin_to_affine(grumpkin::projective_t* point, grumpkin::affine_t* point_out);
|
||||
|
||||
extern "C" void grumpkin_generate_projective_points(grumpkin::projective_t* points, int size);
|
||||
|
||||
extern "C" void grumpkin_generate_affine_points(grumpkin::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t grumpkin_affine_convert_montgomery(
|
||||
grumpkin::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t grumpkin_projective_convert_montgomery(
|
||||
grumpkin::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t grumpkin_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const grumpkin::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<grumpkin::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t grumpkin_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<grumpkin::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t grumpkin_poseidon_hash_cuda(
|
||||
grumpkin::scalar_t* input,
|
||||
grumpkin::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<grumpkin::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t grumpkin_build_poseidon_merkle_tree(
|
||||
const grumpkin::scalar_t* leaves,
|
||||
grumpkin::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<grumpkin::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
|
||||
extern "C" cudaError_t grumpkin_mul_cuda(
|
||||
grumpkin::scalar_t* vec_a, grumpkin::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, grumpkin::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t grumpkin_add_cuda(
|
||||
grumpkin::scalar_t* vec_a, grumpkin::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, grumpkin::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t grumpkin_sub_cuda(
|
||||
grumpkin::scalar_t* vec_a, grumpkin::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, grumpkin::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t grumpkin_transpose_matrix_cuda(
|
||||
const grumpkin::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
grumpkin::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
|
||||
extern "C" void grumpkin_generate_scalars(grumpkin::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t grumpkin_scalar_convert_montgomery(
|
||||
grumpkin::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
#endif
|
||||
16
icicle/include/api/hash.h
Normal file
16
icicle/include/api/hash.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#ifndef HASH_API_H
|
||||
#define HASH_API_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
#include "hash/keccak/keccak.cuh"
|
||||
|
||||
extern "C" cudaError_t
|
||||
keccak256_cuda(uint8_t* input, int input_block_size, int number_of_blocks, uint8_t* output, KeccakConfig config);
|
||||
|
||||
extern "C" cudaError_t
|
||||
keccak512_cuda(uint8_t* input, int input_block_size, int number_of_blocks, uint8_t* output, KeccakConfig config);
|
||||
|
||||
#endif
|
||||
13
icicle/include/api/templates/curves/curve.h
Normal file
13
icicle/include/api/templates/curves/curve.h
Normal file
@@ -0,0 +1,13 @@
|
||||
extern "C" bool ${CURVE}_eq(${CURVE}::projective_t* point1, ${CURVE}::projective_t* point2);
|
||||
|
||||
extern "C" void ${CURVE}_to_affine(${CURVE}::projective_t* point, ${CURVE}::affine_t* point_out);
|
||||
|
||||
extern "C" void ${CURVE}_generate_projective_points(${CURVE}::projective_t* points, int size);
|
||||
|
||||
extern "C" void ${CURVE}_generate_affine_points(${CURVE}::affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_affine_convert_montgomery(
|
||||
${CURVE}::affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_projective_convert_montgomery(
|
||||
${CURVE}::projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
13
icicle/include/api/templates/curves/curve_g2.h
Normal file
13
icicle/include/api/templates/curves/curve_g2.h
Normal file
@@ -0,0 +1,13 @@
|
||||
extern "C" bool ${CURVE}_g2_eq(${CURVE}::g2_projective_t* point1, ${CURVE}::g2_projective_t* point2);
|
||||
|
||||
extern "C" void ${CURVE}_g2_to_affine(${CURVE}::g2_projective_t* point, ${CURVE}::g2_affine_t* point_out);
|
||||
|
||||
extern "C" void ${CURVE}_g2_generate_projective_points(${CURVE}::g2_projective_t* points, int size);
|
||||
|
||||
extern "C" void ${CURVE}_g2_generate_affine_points(${CURVE}::g2_affine_t* points, int size);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_g2_affine_convert_montgomery(
|
||||
${CURVE}::g2_affine_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_g2_projective_convert_montgomery(
|
||||
${CURVE}::g2_projective_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
2
icicle/include/api/templates/curves/ecntt.h
Normal file
2
icicle/include/api/templates/curves/ecntt.h
Normal file
@@ -0,0 +1,2 @@
|
||||
extern "C" cudaError_t ${CURVE}_ecntt_cuda(
|
||||
const ${CURVE}::projective_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<${CURVE}::scalar_t>& config, ${CURVE}::projective_t* output);
|
||||
11
icicle/include/api/templates/curves/msm.h
Normal file
11
icicle/include/api/templates/curves/msm.h
Normal file
@@ -0,0 +1,11 @@
|
||||
extern "C" cudaError_t ${CURVE}_precompute_msm_bases_cuda(
|
||||
${CURVE}::affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
${CURVE}::affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_msm_cuda(
|
||||
const ${CURVE}::scalar_t* scalars, const ${CURVE}::affine_t* points, int msm_size, msm::MSMConfig& config, ${CURVE}::projective_t* out);
|
||||
11
icicle/include/api/templates/curves/msm_g2.h
Normal file
11
icicle/include/api/templates/curves/msm_g2.h
Normal file
@@ -0,0 +1,11 @@
|
||||
extern "C" cudaError_t ${CURVE}_g2_precompute_msm_bases_cuda(
|
||||
${CURVE}::g2_affine_t* bases,
|
||||
int bases_size,
|
||||
int precompute_factor,
|
||||
int _c,
|
||||
bool are_bases_on_device,
|
||||
device_context::DeviceContext& ctx,
|
||||
${CURVE}::g2_affine_t* output_bases);
|
||||
|
||||
extern "C" cudaError_t ${CURVE}_g2_msm_cuda(
|
||||
const ${CURVE}::scalar_t* scalars, const ${CURVE}::g2_affine_t* points, int msm_size, msm::MSMConfig& config, ${CURVE}::g2_projective_t* out);
|
||||
4
icicle/include/api/templates/fields/field.h
Normal file
4
icicle/include/api/templates/fields/field.h
Normal file
@@ -0,0 +1,4 @@
|
||||
extern "C" void ${FIELD}_generate_scalars(${FIELD}::scalar_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_scalar_convert_montgomery(
|
||||
${FIELD}::scalar_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
4
icicle/include/api/templates/fields/field_ext.h
Normal file
4
icicle/include/api/templates/fields/field_ext.h
Normal file
@@ -0,0 +1,4 @@
|
||||
extern "C" void ${FIELD}_extension_generate_scalars(${FIELD}::extension_t* scalars, int size);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_extension_scalar_convert_montgomery(
|
||||
${FIELD}::extension_t* d_inout, size_t n, bool is_into, device_context::DeviceContext& ctx);
|
||||
7
icicle/include/api/templates/fields/ntt.h
Normal file
7
icicle/include/api/templates/fields/ntt.h
Normal file
@@ -0,0 +1,7 @@
|
||||
extern "C" cudaError_t ${FIELD}_initialize_domain(
|
||||
${FIELD}::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_ntt_cuda(
|
||||
const ${FIELD}::scalar_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<${FIELD}::scalar_t>& config, ${FIELD}::scalar_t* output);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_release_domain(device_context::DeviceContext& ctx);
|
||||
2
icicle/include/api/templates/fields/ntt_ext.h
Normal file
2
icicle/include/api/templates/fields/ntt_ext.h
Normal file
@@ -0,0 +1,2 @@
|
||||
extern "C" cudaError_t ${FIELD}_extension_ntt_cuda(
|
||||
const ${FIELD}::extension_t* input, int size, ntt::NTTDir dir, ntt::NTTConfig<${FIELD}::scalar_t>& config, ${FIELD}::extension_t* output);
|
||||
26
icicle/include/api/templates/fields/poseidon.h
Normal file
26
icicle/include/api/templates/fields/poseidon.h
Normal file
@@ -0,0 +1,26 @@
|
||||
extern "C" cudaError_t ${FIELD}_create_optimized_poseidon_constants_cuda(
|
||||
int arity,
|
||||
int full_rounds_half,
|
||||
int partial_rounds,
|
||||
const ${FIELD}::scalar_t* constants,
|
||||
device_context::DeviceContext& ctx,
|
||||
poseidon::PoseidonConstants<${FIELD}::scalar_t>* poseidon_constants);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_init_optimized_poseidon_constants_cuda(
|
||||
int arity, device_context::DeviceContext& ctx, poseidon::PoseidonConstants<${FIELD}::scalar_t>* constants);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_poseidon_hash_cuda(
|
||||
${FIELD}::scalar_t* input,
|
||||
${FIELD}::scalar_t* output,
|
||||
int number_of_states,
|
||||
int arity,
|
||||
const poseidon::PoseidonConstants<${FIELD}::scalar_t>& constants,
|
||||
poseidon::PoseidonConfig& config);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_build_poseidon_merkle_tree(
|
||||
const ${FIELD}::scalar_t* leaves,
|
||||
${FIELD}::scalar_t* digests,
|
||||
uint32_t height,
|
||||
int arity,
|
||||
poseidon::PoseidonConstants<${FIELD}::scalar_t>& constants,
|
||||
merkle::TreeBuilderConfig& config);
|
||||
17
icicle/include/api/templates/fields/vec_ops.h
Normal file
17
icicle/include/api/templates/fields/vec_ops.h
Normal file
@@ -0,0 +1,17 @@
|
||||
extern "C" cudaError_t ${FIELD}_mul_cuda(
|
||||
${FIELD}::scalar_t* vec_a, ${FIELD}::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_add_cuda(
|
||||
${FIELD}::scalar_t* vec_a, ${FIELD}::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_sub_cuda(
|
||||
${FIELD}::scalar_t* vec_a, ${FIELD}::scalar_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::scalar_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_transpose_matrix_cuda(
|
||||
const ${FIELD}::scalar_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
${FIELD}::scalar_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
17
icicle/include/api/templates/fields/vec_ops_ext.h
Normal file
17
icicle/include/api/templates/fields/vec_ops_ext.h
Normal file
@@ -0,0 +1,17 @@
|
||||
extern "C" cudaError_t ${FIELD}_extension_mul_cuda(
|
||||
${FIELD}::extension_t* vec_a, ${FIELD}::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_extension_add_cuda(
|
||||
${FIELD}::extension_t* vec_a, ${FIELD}::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_extension_sub_cuda(
|
||||
${FIELD}::extension_t* vec_a, ${FIELD}::extension_t* vec_b, int n, vec_ops::VecOpsConfig& config, ${FIELD}::extension_t* result);
|
||||
|
||||
extern "C" cudaError_t ${FIELD}_extension_transpose_matrix_cuda(
|
||||
const ${FIELD}::extension_t* input,
|
||||
uint32_t row_size,
|
||||
uint32_t column_size,
|
||||
${FIELD}::extension_t* output,
|
||||
device_context::DeviceContext& ctx,
|
||||
bool on_device,
|
||||
bool is_async);
|
||||
@@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "field.cuh"
|
||||
#include "gpu-utils/sharedmem.cuh"
|
||||
#include "gpu-utils/modifiers.cuh"
|
||||
#include <iostream>
|
||||
|
||||
template <class FF>
|
||||
class Affine
|
||||
@@ -13,14 +15,14 @@ public:
|
||||
|
||||
static HOST_DEVICE_INLINE Affine zero() { return {FF::zero(), FF::zero()}; }
|
||||
|
||||
static HOST_DEVICE_INLINE Affine ToMontgomery(const Affine& point)
|
||||
static HOST_DEVICE_INLINE Affine to_montgomery(const Affine& point)
|
||||
{
|
||||
return {FF::ToMontgomery(point.x), FF::ToMontgomery(point.y)};
|
||||
return {FF::to_montgomery(point.x), FF::to_montgomery(point.y)};
|
||||
}
|
||||
|
||||
static HOST_DEVICE_INLINE Affine FromMontgomery(const Affine& point)
|
||||
static HOST_DEVICE_INLINE Affine from_montgomery(const Affine& point)
|
||||
{
|
||||
return {FF::FromMontgomery(point.x), FF::FromMontgomery(point.y)};
|
||||
return {FF::from_montgomery(point.x), FF::from_montgomery(point.y)};
|
||||
}
|
||||
|
||||
friend HOST_DEVICE_INLINE bool operator==(const Affine& xs, const Affine& ys)
|
||||
@@ -33,4 +35,13 @@ public:
|
||||
os << "x: " << point.x << "; y: " << point.y;
|
||||
return os;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <class FF>
|
||||
struct SharedMemory<Affine<FF>> {
|
||||
__device__ Affine<FF>* getPointer()
|
||||
{
|
||||
extern __shared__ Affine<FF> s_affine_[];
|
||||
return s_affine_;
|
||||
}
|
||||
};
|
||||
34
icicle/include/curves/curve_config.cuh
Normal file
34
icicle/include/curves/curve_config.cuh
Normal file
@@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
#ifndef CURVE_CONFIG_H
|
||||
#define CURVE_CONFIG_H
|
||||
|
||||
#include "fields/id.h"
|
||||
#include "curves/projective.cuh"
|
||||
|
||||
/**
|
||||
* @namespace curve_config
|
||||
* Namespace with type definitions for short Weierstrass pairing-friendly [elliptic
|
||||
* curves](https://hyperelliptic.org/EFD/g1p/auto-shortw.html). Here, concrete types are created in accordance
|
||||
* with the `-DCURVE` env variable passed during build.
|
||||
*/
|
||||
#if CURVE_ID == BN254
|
||||
#include "curves/params/bn254.cuh"
|
||||
namespace curve_config = bn254;
|
||||
|
||||
#elif CURVE_ID == BLS12_381
|
||||
#include "curves/params/bls12_381.cuh"
|
||||
namespace curve_config = bls12_381;
|
||||
|
||||
#elif CURVE_ID == BLS12_377
|
||||
#include "curves/params/bls12_377.cuh"
|
||||
namespace curve_config = bls12_377;
|
||||
|
||||
#elif CURVE_ID == BW6_761
|
||||
#include "curves/params/bw6_761.cuh"
|
||||
namespace curve_config = bw6_761;
|
||||
|
||||
#elif CURVE_ID == GRUMPKIN
|
||||
#include "curves/params/grumpkin.cuh"
|
||||
namespace curve_config = grumpkin;
|
||||
#endif
|
||||
#endif
|
||||
42
icicle/include/curves/macro.h
Normal file
42
icicle/include/curves/macro.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#pragma once
|
||||
#ifndef CURVE_MACRO_H
|
||||
#define CURVE_MACRO_H
|
||||
|
||||
#define CURVE_DEFINITIONS \
|
||||
/** \
|
||||
* Base field of G1 curve. Is always a prime field. \
|
||||
*/ \
|
||||
typedef Field<fq_config> point_field_t; \
|
||||
\
|
||||
static constexpr point_field_t generator_x = point_field_t{g1_gen_x}; \
|
||||
static constexpr point_field_t generator_y = point_field_t{g1_gen_y}; \
|
||||
static constexpr point_field_t b = point_field_t{weierstrass_b}; \
|
||||
/** \
|
||||
* [Projective representation](https://hyperelliptic.org/EFD/g1p/auto-shortw-projective.html) \
|
||||
* of G1 curve consisting of three coordinates of type [point_field_t](point_field_t). \
|
||||
*/ \
|
||||
typedef Projective<point_field_t, scalar_t, b, generator_x, generator_y> projective_t; \
|
||||
/** \
|
||||
* Affine representation of G1 curve consisting of two coordinates of type [point_field_t](point_field_t). \
|
||||
*/ \
|
||||
typedef Affine<point_field_t> affine_t;
|
||||
|
||||
#define G2_CURVE_DEFINITIONS \
|
||||
typedef ExtensionField<fq_config> g2_point_field_t; \
|
||||
static constexpr g2_point_field_t g2_generator_x = \
|
||||
g2_point_field_t{point_field_t{g2_gen_x_re}, point_field_t{g2_gen_x_im}}; \
|
||||
static constexpr g2_point_field_t g2_generator_y = \
|
||||
g2_point_field_t{point_field_t{g2_gen_y_re}, point_field_t{g2_gen_y_im}}; \
|
||||
static constexpr g2_point_field_t g2_b = \
|
||||
g2_point_field_t{point_field_t{weierstrass_b_g2_re}, point_field_t{weierstrass_b_g2_im}}; \
|
||||
\
|
||||
/** \
|
||||
* [Projective representation](https://hyperelliptic.org/EFD/g1p/auto-shortw-projective.html) of G2 curve. \
|
||||
*/ \
|
||||
typedef Projective<g2_point_field_t, scalar_t, g2_b, g2_generator_x, g2_generator_y> g2_projective_t; \
|
||||
/** \
|
||||
* Affine representation of G1 curve. \
|
||||
*/ \
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
|
||||
#endif
|
||||
48
icicle/include/curves/params/bls12_377.cuh
Normal file
48
icicle/include/curves/params/bls12_377.cuh
Normal file
@@ -0,0 +1,48 @@
|
||||
#pragma once
|
||||
#ifndef BLS12_377_PARAMS_H
|
||||
#define BLS12_377_PARAMS_H
|
||||
|
||||
#include "fields/storage.cuh"
|
||||
|
||||
#include "curves/macro.h"
|
||||
#include "curves/projective.cuh"
|
||||
#include "fields/snark_fields/bls12_377_base.cuh"
|
||||
#include "fields/snark_fields/bls12_377_scalar.cuh"
|
||||
#include "fields/quadratic_extension.cuh"
|
||||
|
||||
namespace bls12_377 {
|
||||
// G1 and G2 generators
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_x = {0xb21be9ef, 0xeab9b16e, 0xffcd394e, 0xd5481512,
|
||||
0xbd37cb5c, 0x188282c8, 0xaa9d41bb, 0x85951e2c,
|
||||
0xbf87ff54, 0xc8fc6225, 0xfe740a67, 0x008848de};
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_y = {0x559c8ea6, 0xfd82de55, 0x34a9591a, 0xc2fe3d36,
|
||||
0x4fb82305, 0x6d182ad4, 0xca3e52d9, 0xbd7fb348,
|
||||
0x30afeec4, 0x1f674f5d, 0xc5102eff, 0x01914a69};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_re = {0x7c005196, 0x74e3e48f, 0xbb535402, 0x71889f52,
|
||||
0x57db6b9b, 0x7ea501f5, 0x203e5031, 0xc565f071,
|
||||
0xa3841d01, 0xc89630a2, 0x71c785fe, 0x018480be};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_im = {0x6ea16afe, 0xb26bfefa, 0xbff76fe6, 0x5cf89984,
|
||||
0x0799c9de, 0xe7223ece, 0x6651cecb, 0x532777ee,
|
||||
0xb1b140d5, 0x70dc5a51, 0xe7004031, 0x00ea6040};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_re = {0x09fd4ddf, 0xf0940944, 0x6d8c7c2e, 0xf2cf8888,
|
||||
0xf832d204, 0xe458c282, 0x74b49a58, 0xde03ed72,
|
||||
0xcbb2efb4, 0xd960736b, 0x5d446f7b, 0x00690d66};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_im = {0x85eb8f93, 0xd9a1cdd1, 0x5e52270b, 0x4279b83f,
|
||||
0xcee304c2, 0x2463b01a, 0x3d591bf1, 0x61ef11ac,
|
||||
0x151a70aa, 0x9e549da3, 0xd2835518, 0x00f8169f};
|
||||
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b = {0x00000001, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_re = {
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_im = {
|
||||
0x9999999a, 0x1c9ed999, 0x1ccccccd, 0x0dd39e5c, 0x3c6bf800, 0x129207b6,
|
||||
0xcd5fd889, 0xdc7b4f91, 0x7460c589, 0x43bd0373, 0xdb0fd6f3, 0x010222f6};
|
||||
|
||||
CURVE_DEFINITIONS
|
||||
G2_CURVE_DEFINITIONS
|
||||
} // namespace bls12_377
|
||||
|
||||
#endif
|
||||
48
icicle/include/curves/params/bls12_381.cuh
Normal file
48
icicle/include/curves/params/bls12_381.cuh
Normal file
@@ -0,0 +1,48 @@
|
||||
#pragma once
|
||||
#ifndef BLS12_381_PARAMS_H
|
||||
#define BLS12_381_PARAMS_H
|
||||
|
||||
#include "fields/storage.cuh"
|
||||
|
||||
#include "curves/macro.h"
|
||||
#include "curves/projective.cuh"
|
||||
#include "fields/snark_fields/bls12_381_base.cuh"
|
||||
#include "fields/snark_fields/bls12_381_scalar.cuh"
|
||||
#include "fields/quadratic_extension.cuh"
|
||||
|
||||
namespace bls12_381 {
|
||||
// G1 and G2 generators
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_x = {0xdb22c6bb, 0xfb3af00a, 0xf97a1aef, 0x6c55e83f,
|
||||
0x171bac58, 0xa14e3a3f, 0x9774b905, 0xc3688c4f,
|
||||
0x4fa9ac0f, 0x2695638c, 0x3197d794, 0x17f1d3a7};
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_y = {0x46c5e7e1, 0x0caa2329, 0xa2888ae4, 0xd03cc744,
|
||||
0x2c04b3ed, 0x00db18cb, 0xd5d00af6, 0xfcf5e095,
|
||||
0x741d8ae4, 0xa09e30ed, 0xe3aaa0f1, 0x08b3f481};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_re = {0xc121bdb8, 0xd48056c8, 0xa805bbef, 0x0bac0326,
|
||||
0x7ae3d177, 0xb4510b64, 0xfa403b02, 0xc6e47ad4,
|
||||
0x2dc51051, 0x26080527, 0xf08f0a91, 0x024aa2b2};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_im = {0x5d042b7e, 0xe5ac7d05, 0x13945d57, 0x334cf112,
|
||||
0xdc7f5049, 0xb5da61bb, 0x9920b61a, 0x596bd0d0,
|
||||
0x88274f65, 0x7dacd3a0, 0x52719f60, 0x13e02b60};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_re = {0x08b82801, 0xe1935486, 0x3baca289, 0x923ac9cc,
|
||||
0x5160d12c, 0x6d429a69, 0x8cbdd3a7, 0xadfd9baa,
|
||||
0xda2e351a, 0x8cc9cdc6, 0x727d6e11, 0x0ce5d527};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_im = {0xf05f79be, 0xaaa9075f, 0x5cec1da1, 0x3f370d27,
|
||||
0x572e99ab, 0x267492ab, 0x85a763af, 0xcb3e287e,
|
||||
0x2bc28b99, 0x32acd2b0, 0x2ea734cc, 0x0606c4a0};
|
||||
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b = {0x00000004, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_re = {
|
||||
0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_im = {
|
||||
0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
|
||||
CURVE_DEFINITIONS
|
||||
G2_CURVE_DEFINITIONS
|
||||
} // namespace bls12_381
|
||||
|
||||
#endif
|
||||
39
icicle/include/curves/params/bn254.cuh
Normal file
39
icicle/include/curves/params/bn254.cuh
Normal file
@@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
#ifndef BN254_PARAMS_H
|
||||
#define BN254_PARAMS_H
|
||||
|
||||
#include "fields/storage.cuh"
|
||||
|
||||
#include "curves/macro.h"
|
||||
#include "curves/projective.cuh"
|
||||
#include "fields/snark_fields/bn254_base.cuh"
|
||||
#include "fields/snark_fields/bn254_scalar.cuh"
|
||||
#include "fields/quadratic_extension.cuh"
|
||||
|
||||
namespace bn254 {
|
||||
// G1 and G2 generators
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_x = {0x00000001, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_y = {0x00000002, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_re = {0xd992f6ed, 0x46debd5c, 0xf75edadd, 0x674322d4,
|
||||
0x5e5c4479, 0x426a0066, 0x121f1e76, 0x1800deef};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x_im = {0xaef312c2, 0x97e485b7, 0x35a9e712, 0xf1aa4933,
|
||||
0x31fb5d25, 0x7260bfb7, 0x920d483a, 0x198e9393};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_re = {0x66fa7daa, 0x4ce6cc01, 0x0c43d37b, 0xe3d1e769,
|
||||
0x8dcb408f, 0x4aab7180, 0xdb8c6deb, 0x12c85ea5};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y_im = {0xd122975b, 0x55acdadc, 0x70b38ef3, 0xbc4b3133,
|
||||
0x690c3395, 0xec9e99ad, 0x585ff075, 0x090689d0};
|
||||
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b = {0x00000003, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_re = {
|
||||
0x24a138e5, 0x3267e6dc, 0x59dbefa3, 0xb5b4c5e5, 0x1be06ac3, 0x81be1899, 0xceb8aaae, 0x2b149d40};
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b_g2_im = {
|
||||
0x85c315d2, 0xe4a2bd06, 0xe52d1852, 0xa74fa084, 0xeed8fdf4, 0xcd2cafad, 0x3af0fed4, 0x009713b0};
|
||||
|
||||
CURVE_DEFINITIONS
|
||||
G2_CURVE_DEFINITIONS
|
||||
} // namespace bn254
|
||||
|
||||
#endif
|
||||
58
icicle/include/curves/params/bw6_761.cuh
Normal file
58
icicle/include/curves/params/bw6_761.cuh
Normal file
@@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
#ifndef BW6_761_PARAMS_H
|
||||
#define BW6_761_PARAMS_H
|
||||
|
||||
#include "fields/storage.cuh"
|
||||
|
||||
#include "curves/macro.h"
|
||||
#include "curves/projective.cuh"
|
||||
#include "fields/snark_fields/bw6_761_base.cuh"
|
||||
#include "fields/snark_fields/bw6_761_scalar.cuh"
|
||||
#include "fields/quadratic_extension.cuh"
|
||||
|
||||
namespace bw6_761 {
|
||||
// G1 and G2 generators
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_x = {
|
||||
0x66e5b43d, 0x4088f3af, 0xa6af603f, 0x055928ac, 0x56133e82, 0x6750dd03, 0x280ca27f, 0x03758f9a,
|
||||
0xc9ea0971, 0x5bd71fa0, 0x47729b90, 0xa17a54ce, 0x94c2e746, 0x11dbfcd2, 0xc15520ac, 0x79017ffa,
|
||||
0x85f56fc7, 0xee05c54b, 0x551b27f0, 0xe6a0cfb7, 0xa477beae, 0xb277ce98, 0x0ea190c8, 0x01075b02};
|
||||
static constexpr storage<fq_config::limbs_count> g1_gen_y = {
|
||||
0xb4e95363, 0xbafc8f2d, 0x0b20d2a1, 0xad1cb2be, 0xcad0fb93, 0xb2b08119, 0xb3053253, 0x9f9df141,
|
||||
0x6fc2cdd4, 0xbe3fb90b, 0x717a4c55, 0xcc685d31, 0x71b5b806, 0xc5b8fa17, 0xaf7e0dba, 0x265909f1,
|
||||
0xa2e573a3, 0x1a7348d2, 0x884c9ec6, 0x0f952589, 0x45cc2a42, 0xe6fd637b, 0x0a6fc574, 0x0058b84e};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_x = {
|
||||
0xcd025f1c, 0xa830c194, 0xe1bf995b, 0x6410cf4f, 0xc2ad54b0, 0x00e96efb, 0x3cd208d7, 0xce6948cb,
|
||||
0x00e1b6ba, 0x963317a3, 0xac70e7c7, 0xc5bbcae9, 0xf09feb58, 0x734ec3f1, 0xab3da268, 0x26b41c5d,
|
||||
0x13890f6d, 0x4c062010, 0xc5a7115f, 0xd61053aa, 0x69d660f9, 0xc852a82e, 0x41d9b816, 0x01101332};
|
||||
static constexpr storage<fq_config::limbs_count> g2_gen_y = {
|
||||
0x28c73b61, 0xeb70a167, 0xf9eac689, 0x91ec0594, 0x3c5a02a5, 0x58aa2d3a, 0x504affc7, 0x3ea96fcd,
|
||||
0xffa82300, 0x8906c170, 0xd2c712b8, 0x64f293db, 0x33293fef, 0x94c97eb7, 0x0b95a59c, 0x0a1d86c8,
|
||||
0x53ffe316, 0x81a78e27, 0xcec2181c, 0x26b7cf9a, 0xe4b6d2dc, 0x8179eb10, 0x7761369f, 0x0017c335};
|
||||
|
||||
static constexpr storage<fq_config::limbs_count> weierstrass_b = {
|
||||
0x0000008a, 0xf49d0000, 0x70000082, 0xe6913e68, 0xeaf0a437, 0x160cf8ae, 0x5667a8f8, 0x98a116c2,
|
||||
0x73ebff2e, 0x71dcd3dc, 0x12f9fd90, 0x8689c8ed, 0x25b42304, 0x03cebaff, 0xe584e919, 0x707ba638,
|
||||
0x8087be41, 0x528275ef, 0x81d14688, 0xb926186a, 0x04faff3e, 0xd187c940, 0xfb83ce0a, 0x0122e824};
|
||||
static constexpr storage<fq_config::limbs_count> g2_weierstrass_b = {
|
||||
0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
|
||||
CURVE_DEFINITIONS
|
||||
|
||||
typedef point_field_t g2_point_field_t;
|
||||
static constexpr g2_point_field_t g2_generator_x = g2_point_field_t{g2_gen_x};
|
||||
static constexpr g2_point_field_t g2_generator_y = g2_point_field_t{g2_gen_y};
|
||||
static constexpr g2_point_field_t g2_b = g2_point_field_t{g2_weierstrass_b};
|
||||
|
||||
/**
|
||||
* [Projective representation](https://hyperelliptic.org/EFD/g1p/auto-shortw-projective.html) of G2 curve.
|
||||
*/
|
||||
typedef Projective<g2_point_field_t, scalar_t, g2_b, g2_generator_x, g2_generator_y> g2_projective_t;
|
||||
/**
|
||||
* Affine representation of G1 curve.
|
||||
*/
|
||||
typedef Affine<g2_point_field_t> g2_affine_t;
|
||||
} // namespace bw6_761
|
||||
|
||||
#endif
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user