Compare commits

...

228 Commits

Author SHA1 Message Date
Ethan-000
621676bd41 feat: add num_bits() function (#570)
## Describe the changes

adding a `num_bits()` function similar to
dcf73a5f96/ff/src/biginteger/mod.rs (L482)

this could be useful for small field optimizations

## Linked Issues

Resolves #
2024-08-07 09:37:16 +03:00
Otsar
badb8c5d68 Removed ZK containers from docs sidebar (#571)
## Describe the changes

This PR...

## Linked Issues

Resolves #
2024-08-04 18:38:37 +03:00
Otsar
1300434bbe Removed ZK containers from docs sidebar 2024-08-04 11:14:06 +03:00
yshekel
6a67893773 remove the recommnedation to use zk-contariners in examples (#569) 2024-08-01 14:58:02 +03:00
ChickenLover
0cb0b49be9 Add Sha3 (#560)
## Describe the changes

This PR...

## Linked Issues

Resolves #
2024-07-28 15:31:28 +07:00
Vlad
8411ed1451 Feat/vlad/refactor from affine (#554)
## Describe the changes

This PR refactors the different affine to projective conversion
functions using the C function

also small bug fix for ProjectiveToAffine() function in Go

## Linked Issues

Resolves #
2024-07-22 10:37:24 +02:00
omahs
53f34aade5 Fix typos (#558)
Fix typos
2024-07-18 11:58:04 +03:00
release-bot
aacec3f72f Bump rust crates' version
icicle-babybear@2.8.0
icicle-bls12-377@2.8.0
icicle-bls12-381@2.8.0
icicle-bn254@2.8.0
icicle-bw6-761@2.8.0
icicle-core@2.8.0
icicle-cuda-runtime@2.8.0
icicle-grumpkin@2.8.0
icicle-hash@2.8.0
icicle-m31@2.8.0
icicle-stark252@2.8.0

Generated by cargo-workspaces
2024-07-16 13:57:56 +00:00
ChickenLover
a8fa05d0e3 Feat/roman/hash docs (#556)
## Describe the changes

This PR...

## Linked Issues

Resolves #

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-07-16 16:39:35 +03:00
Vlad
877018c84c more go fmt 2024-07-15 16:55:40 +02:00
Vlad
91ac666e06 Merge branch 'feat/vlad/refactor-from-affine' of github.com:ingonyama-zk/icicle into feat/vlad/refactor-from-affine 2024-07-15 16:48:25 +02:00
Vlad
46e6c20440 go fmt 2024-07-15 16:47:40 +02:00
ChickenLover
ea71faf1fa add keccak tree builder (#555) 2024-07-15 15:31:12 +07:00
ChickenLover
7fd9ed1b49 Feat/roman/tree builder (#525)
# Updates:

## Hashing

 - Added SpongeHasher class
 - Can be used to accept any hash function as an argument
 - Absorb and squeeze are now separated
- Memory management is now mostly done by SpongeHasher class, each hash
function only describes permutation kernels

## Tree builder

 - Tree builder is now hash-agnostic. 
 - Tree builder now supports 2D input (matrices)
- Tree builder can now use two different hash functions for layer 0 and
compression layers

## Poseidon1

 - Interface changed to classes
 - Now allows for any alpha
 - Now allows passing constants not in a single vector
 - Now allows for any domain tag
 - Constants are now released upon going out of scope
 - Rust wrappers changed to Poseidon struct
 
 ## Poseidon2
 
 - Interface changed to classes
 - Constants are now released upon going out of scope
 - Rust wrappers changed to Poseidon2 struct
 
## Keccak

 - Added Keccak class which inherits SpongeHasher
 - Now doesn't use gpu registers for storing states
 
 To do:
- [x] Update poseidon1 golang bindings
- [x] Update poseidon1 examples
- [x] Fix poseidon2 cuda test
- [x] Fix poseidon2 merkle tree builder test
- [x] Update keccak class with new design
- [x] Update keccak test
- [x] Check keccak correctness
- [x] Update tree builder rust wrappers
- [x] Leave doc comments

Future work:  
- [ ] Add keccak merkle tree builder externs
- [ ] Add keccak rust tree builder wrappers
- [ ] Write docs
- [ ] Add example
- [ ] Fix device output for tree builder

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
Co-authored-by: nonam3e <71525212+nonam3e@users.noreply.github.com>
2024-07-11 13:46:25 +07:00
DmytroTym
2d4059c61f Field creation automated through macros (#551)
Params files for fields now only require modulus specified by the user
(also twiddle generator and/or non-residue in case either or both are
needed). Everything else gets generated by a macro.
2024-07-08 10:39:50 +03:00
Vlad
e4eda8938d go fmt 2024-07-05 21:29:44 +02:00
Vlad
fb707d5350 Merge branch 'main' into feat/vlad/refactor-from-affine 2024-07-05 15:40:34 +02:00
release-bot
73cd4c0a99 Bump rust crates' version
icicle-babybear@2.7.1
icicle-bls12-377@2.7.1
icicle-bls12-381@2.7.1
icicle-bn254@2.7.1
icicle-bw6-761@2.7.1
icicle-core@2.7.1
icicle-cuda-runtime@2.7.1
icicle-grumpkin@2.7.1
icicle-hash@2.7.1
icicle-m31@2.7.1
icicle-stark252@2.7.1

Generated by cargo-workspaces
2024-07-04 12:34:26 +00:00
yshekel
5516320ad7 fix large (>512 elements) ecntt issue (#553)
This PR solves an issue for large ecntt where cuda blocks are too large
and cannot be assigned to SMs. The fix is to reduce thread count per
block and increase block count in that case.
2024-07-04 15:33:49 +03:00
Vlad
6336e74d5a refactor from_affine with C link 2024-07-04 11:03:58 +02:00
Vlad
a4b1eb3de9 Fix affine to projective zero point bug (#552)
## Describe the changes

This PR fixes affine to projective functions in bindings by adding a
condition if the point in affine form is zero then return the projective zero

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-07-04 09:31:59 +03:00
release-bot
31083463be Bump rust crates' version
icicle-babybear@2.7.0
icicle-bls12-377@2.7.0
icicle-bls12-381@2.7.0
icicle-bn254@2.7.0
icicle-bw6-761@2.7.0
icicle-core@2.7.0
icicle-cuda-runtime@2.7.0
icicle-grumpkin@2.7.0
icicle-hash@2.7.0
icicle-m31@2.7.0
icicle-stark252@2.7.0

Generated by cargo-workspaces
2024-07-03 19:06:35 +00:00
nonam3e
b908053c0c Feat/m31 (#547)
This PR adds support of the m31 Field

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-07-03 20:48:28 +07:00
Vlad
279cdc66e0 generated go files 2024-07-03 10:41:32 +02:00
Vlad
81644fc28c use zero method of projective in toProjective
Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-07-03 10:37:02 +02:00
Vlad
17732ea013 use zero method of projective in fromAffine
Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-07-03 10:36:14 +02:00
Vlad
9e057c835d fixed to_projective in rust 2024-07-03 09:18:41 +02:00
Vlad
f08b5bb49d fixed fromAffine and toProj in golang 2024-07-03 09:07:43 +02:00
Stas
29da36d7be RISC0 example using Polynomial API (#548)
## New Example

This new c++ example shows the basics of RISC0 protocol using our
Polynomial API
2024-07-02 08:00:03 -06:00
HadarIngonyama
4fef542346 MSM - fixed bug in reduction phase (#549)
This PR fixes a bug in the iterative reduction algorithm.
There were unsynchronized threads reading and writing to the same
addresses that caused MSM to fail a small percentage of the time - this is fixed now.
2024-06-30 12:05:55 +03:00
release-bot
f812f071fa Bump rust crates' version
icicle-babybear@2.6.0
icicle-bls12-377@2.6.0
icicle-bls12-381@2.6.0
icicle-bn254@2.6.0
icicle-bw6-761@2.6.0
icicle-core@2.6.0
icicle-cuda-runtime@2.6.0
icicle-grumpkin@2.6.0
icicle-hash@2.6.0
icicle-stark252@2.6.0

Generated by cargo-workspaces
2024-06-24 11:56:28 +00:00
Jeremy Felder
2b07513310 [FEAT]: Golang Bindings for pinned host memory (#519)
## Describe the changes

This PR adds the capability to pin host memory in golang bindings
allowing data transfers to be quicker. Memory can be pinned once for
multiple devices by passing the flag
`cuda_runtime.CudaHostRegisterPortable` or
`cuda_runtime.CudaHostAllocPortable` depending on how pinned memory is
called
2024-06-24 14:03:44 +03:00
HadarIngonyama
7831f7bd0f Msm/update docs (#545)
Updates MSM documentation

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
2024-06-19 11:38:24 +03:00
Otsar
de25b6e203 Added v2 paper (#544) 2024-06-18 15:19:49 +03:00
Otsar
69383e6c73 Update docusaurus.config.js
bold, added emoji
2024-06-18 15:04:26 +03:00
Otsar
c305aade5d Update overview.md 2024-06-18 15:00:24 +03:00
Otsar
87bdf04a19 Update docusaurus.config.js 2024-06-18 13:05:14 +03:00
Otsar
e152977843 Update overview.md
Added v2 paper
2024-06-18 12:23:03 +03:00
release-bot
3d01c09c82 Bump rust crates' version
icicle-babybear@2.5.0
icicle-bls12-377@2.5.0
icicle-bls12-381@2.5.0
icicle-bn254@2.5.0
icicle-bw6-761@2.5.0
icicle-core@2.5.0
icicle-cuda-runtime@2.5.0
icicle-grumpkin@2.5.0
icicle-hash@2.5.0
icicle-stark252@2.5.0

Generated by cargo-workspaces
2024-06-17 13:17:24 +00:00
HadarIngonyama
8936d9c800 MSM - supporting all window sizes (#534)
This PR enables using MSM with any value of c.

Note: default c isn't necessarily optimal, the user is expected to
choose c and the precomputation factor that give the best results for
the relevant case.

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-06-17 15:57:24 +03:00
Jeremy Felder
af9ec76506 Fix link and correct path for running test deploy workflow (#542)
## Describe the changes

Fixes a link issue in docs preventing deployment
2024-06-17 15:44:15 +03:00
Otsar
cdd99d2a46 recreated images for poseidon.md (#541)
Fixed 3 images shown in low quality - i have recreated the 3 images -
please check me to see that i have not made a mistake
2024-06-17 12:16:26 +03:00
Jeremy Felder
3e551762c0 Updated alt text for images and fixed broken link 2024-06-16 18:35:42 +03:00
Otsar
37c22e81e7 Update poseidon.md
fixed - added arrows
2024-06-16 15:01:12 +03:00
Otsar
69e73ffa3e Update poseidon.md
Fixed image quality
2024-06-16 11:42:46 +03:00
cangqiaoyuzhuo
512e1ca372 chore: remove repeat word (#540)
## Describe the changes

remove repeat word

## Linked Issues

Resolves #

Signed-off-by: cangqiaoyuzhuo <850072022@qq.com>
2024-06-13 11:53:22 +03:00
VitaliiH
e19a869691 accumulate stwo (#535)
adds in-place vector addition and api as accumulate
2024-06-10 12:24:58 +02:00
yshekel
9c55d888ae workflow curve fix (#536) 2024-06-09 11:18:23 +03:00
release-bot
18f51de56c Bump rust crates' version
icicle-babybear@2.4.0
icicle-bls12-377@2.4.0
icicle-bls12-381@2.4.0
icicle-bn254@2.4.0
icicle-bw6-761@2.4.0
icicle-core@2.4.0
icicle-cuda-runtime@2.4.0
icicle-grumpkin@2.4.0
icicle-hash@2.4.0
icicle-stark252@2.4.0

Generated by cargo-workspaces
2024-06-06 14:42:36 +00:00
yshekel
33b1f3c794 perf: projective scalar multiplication use dbl() rather than + (#530) 2024-06-05 20:35:21 +03:00
Karthik Inbasekar
3a276ef23c added example cpp: example_commit_with_device_memory_view() (#532)
## Describe the changes

This PR...
Added an example for simple commit that makes use of polynomial views. 
Output attached

```
Example: a) commit with Polynomial views [(f1+f2)^2 + (f1-f2)^2 ]_1 = [4 (f1^2+ f_2^2)]_1
Example: b) commit with Polynomial views [(f1+f2)^2 - (f1-f2)^2 ]_1 = [4 f1 *f_2]_1
Setup: Generating mock SRS
Setup: SRS of length 1025 generated and loaded to device. Took: 19557 milliseconds
Setup: Generating polys (on device) f1,f2 of log degree 10
Setup: Gen poly done. Took: 7 milliseconds
Computing constraints..start 
Computing constraints..done. Took: 0 milliseconds
Computing Commitments with poly view
Commitments done. Took: 29 milliseconds
commitment [(f1+f2)^2 + (f1-f2)^2]_1:
[x: 0x1e35d81da10e5026dacdd907d6ed0dde673de449ff8c0137ec6acbfd6b1dfe1b, y: 0x21fc051415af35a781f84ebcf999313d489ae38ebefa561c9de2fb0b11091502]
commitment [[2 (f_1^2+f_2^2]_1:
[x: 0x1e35d81da10e5026dacdd907d6ed0dde673de449ff8c0137ec6acbfd6b1dfe1b, y: 0x21fc051415af35a781f84ebcf999313d489ae38ebefa561c9de2fb0b11091502]
commitment [(f1+f2)^2 - (f1-f2)^2]_1:
[x: 0x21e9dc012aef8d95107fbfe63f455d4345b9b21e37bcb0a49043b1066e211ffa, y: 0x2d6a3b2f1be1042a17c58ff595134b9cceb71d1af4f1c67a5696859cd4bafae3]
commitment [4 f_1*f_2]_1:
[x: 0x21e9dc012aef8d95107fbfe63f455d4345b9b21e37bcb0a49043b1066e211ffa, y: 0x2d6a3b2f1be1042a17c58ff595134b9cceb71d1af4f1c67a5696859cd4bafae3]
```

## Linked Issues

Resolves #
2024-06-05 18:25:12 +03:00
nonam3e
8e62bde16d bit reverse (#528)
This PR adds bit reverse operation support to icicle
2024-06-02 16:37:58 +07:00
Jeremy Felder
417ca77f61 precompute bug fix (#529)
This PR fixes 2 things:

1. Removes the assertion regarding the precompute factor needing to be a
power of 2. There is no such requirement and it works just fine for
other values too.
2. Fixes the average bucket size for the large buckets threshold - it
depends on the precompute factor.
2024-05-29 13:59:48 +03:00
hadaringonyama
8911a32135 precompute bug fix 2024-05-28 12:48:48 +03:00
release-bot
c6f6e61d60 Bump rust crates' version
icicle-babybear@2.3.1
icicle-bls12-377@2.3.1
icicle-bls12-381@2.3.1
icicle-bn254@2.3.1
icicle-bw6-761@2.3.1
icicle-core@2.3.1
icicle-cuda-runtime@2.3.1
icicle-grumpkin@2.3.1
icicle-hash@2.3.1
icicle-stark252@2.3.1

Generated by cargo-workspaces
2024-05-20 13:43:32 +00:00
yshekel
4e3aa63d2f fix: ntt mixed-radix bug for large ntts (>4G elements) (#523)
in some cases 32b values would wrap around and cause invalid accesses to
wrong elements and memory addresses
2024-05-20 16:42:44 +03:00
Leon Hibnik
db298aefc1 [HOTFIX] rust msm benchmarks (#521)
## Describe the changes

removes unused host to device copy, adds minimum limit to run MSM
benchmarks
2024-05-20 13:51:53 +03:00
yshekel
19a9b76d64 fix: cmake set_gpu_env() and windows build (#520) 2024-05-20 13:05:45 +03:00
Jeremy Felder
1e343f17a3 Allow overriding compiler's chosen GPU arch via cmake (#518)
## Describe the changes

This PR modifies icicle/cmake/Common.cmake to set
CMAKE_CUDA_ARCHITECTURES to ${CUDA_ARCH} if the user defines the arch,
to set CMAKE_CUDA_ARCHITECTURES to native if the cmake version is
greater than or equal to 3.24.0. This change has been successfully
tested with cmake 3.22.0 and 3.25.2.

## Linked Issues

Resolves #167.
2024-05-19 16:03:15 +03:00
liuhao230
cfea6ebb3b Merge branch 'ingonyama-zk:main' into main 2024-05-17 14:24:02 +08:00
release-bot
76a82bf88e Bump rust crates' version
icicle-babybear@2.3.0
icicle-bls12-377@2.3.0
icicle-bls12-381@2.3.0
icicle-bn254@2.3.0
icicle-bw6-761@2.3.0
icicle-core@2.3.0
icicle-cuda-runtime@2.3.0
icicle-grumpkin@2.3.0
icicle-hash@2.3.0
icicle-stark252@2.3.0

Generated by cargo-workspaces
2024-05-17 04:42:17 +00:00
Vlad
b8310d577e Feat/vlad/poseidon go binding (#513) 2024-05-17 07:20:15 +03:00
liu
49c7fa4b28 fix: add the PARENT_SCOPE
Signed-off-by: liu <liuhao2206@buaa.edu.cn>
2024-05-17 10:45:09 +08:00
Stas
02059fcfaa Stas/best-practice-ntt (#517)
## Describe the changes

Icicle examples:  Concurrent Data Transfer and NTT Computation

This PR introduces a Best Practice series of examples in c++.
Specifically, the example shows how to concurrently transfer data
to/from device and execute NTT

## Linked Issues

Resolves #
2024-05-16 23:51:49 +03:00
nonam3e
4496520a10 golang examples init (#516)
## Describe the changes

This PR adds golang examples

---------

Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-05-16 19:40:13 +03:00
liu
88a6966a4b Allow overriding compiler's chosen GPU arch via cmake 2024-05-15 22:40:51 +08:00
yshekel
9c1afe8a44 Polynomial API views replaced by evaluation on rou domain (#514)
- removed poly API to access view of evaluations. This is a problematic API since it cannot handle small domains and for large domains requires the polynomial to use more memory than need to.
- added evaluate_on_rou_domain() API instead that supports any domain size (powers of two size).
- the new API can compute to HOST or DEVICE memory
- Rust wrapper for evaluate_on_rou_domain()
- updated documentation: overview and Rust wrappers
- faster division by vanishing poly for common case where numerator is 2N and vanishing poly is of degree N.
- allow division a/b where deg(a)<deg(b) instead of throwing an error.
2024-05-15 14:06:23 +03:00
Jeremy Felder
972b924bc0 Update CI to run on some non-code changes (#515)
## Describe the changes

This PR:
- Updates the CI to run on CI workflow file changes
- Updates examples CI to run on examples file changes
2024-05-15 13:17:13 +03:00
sukrucildirr
230a1da512 Fix broken link (#512)
## Describe the changes

There was a broken link is linked to ZKContainer word.

## Linked Issues

Resolves #
2024-05-14 08:36:39 +07:00
release-bot
940b283c47 Bump rust crates' version
icicle-babybear@2.2.0
icicle-bls12-377@2.2.0
icicle-bls12-381@2.2.0
icicle-bn254@2.2.0
icicle-bw6-761@2.2.0
icicle-core@2.2.0
icicle-cuda-runtime@2.2.0
icicle-grumpkin@2.2.0
icicle-hash@2.2.0
icicle-stark252@2.2.0

Generated by cargo-workspaces
2024-05-09 12:27:17 +00:00
Leon Hibnik
e0412183fd syntax highlight (#511)
## Describe the changes
adds syntax highlighting to `rust` and `go`
2024-05-09 15:23:20 +03:00
ChickenLover
9da52bc09f Feat/roman/poseidon2 (#510)
# This PR

1. Adds C++ API
2. Renames a lot of API functions
3. Adds inplace poseidon2
4. Makes input const at all poseidon functions
5. Adds benchmark for poseidon2
2024-05-09 19:19:55 +07:00
VitaliiH
49079d0d2a rust ecntt hotfix (#509)
## Describe the changes

This PR fixes Rust ECNTT benches and tests


---------

Co-authored-by: VitaliiH <Vitaliy@ingo>
2024-05-09 11:21:21 +03:00
ChickenLover
094683d291 Feat/roman/poseidon2 (#507)
This PR adds support for poseidon2 permutation function as described in
https://eprint.iacr.org/2023/323.pdf

Reference implementations used (and compared against):
https://github.com/HorizenLabs/poseidon2/tree/main
https://github.com/Plonky3/Plonky3/tree/main

Tasks:

- [x] Remove commented code and prints
- [ ] Add doc-comments to functions and structs
- [x] Fix possible issue with Plonky3 imports
- [x] Update NTT/Plonky3 test
- [x] Add Plonky3-bn254 test (impossible)
2024-05-09 15:13:43 +07:00
nonam3e
c30e333819 keccak docs (#508)
This PR adds keccak docs

---------

Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
2024-05-08 23:18:59 +03:00
yshekel
2905d2a469 fix: bug regarding polynomial evaluations view in CUDA backend (#506)
fixing:
(1) not building polynomials and tests for grumpkin curve (no NTT)
(2) polynomial API C++ example compilation and (when compilation is
fixed) memory corruption
(3) bug fix in poly CUDA backend regarding transformation to evaluations
in some cases
2024-05-08 21:02:18 +03:00
Jeremy Felder
732ee51552 [CI]: Update Cpp CI to include build args (#503)
## Describe the changes

This PR adds build args to the Cpp CI and adds grumpkin curve and
stark252 field
2024-05-08 14:35:02 +03:00
Jeremy Felder
14997566ff [FIX]: Fix releasing device set on host thread during multigpu call (#501)
## Describe the changes

This PR fixes an issue when `RunOnDevice` is called for multi-gpu while
other goroutines calling device operations are run outside of
`RunOnDevice`. The issue comes from setting a device other than the
default device (device 0) on a host thread within `RunOnDevice` and not
unsetting that host threads device when `RunOnDevice` finishes.

When `RunOnDevice` locks a host thread to ensure that all other calls in
the go routine are on the same device, it never unsets that thread’s
device. Once the thread is unlocked, other go routines can get scheduled
to it but it still has the device set to whatever it was before while it
was locked so its possible that the following sequence happens:

1. NTT domain is initialized on thread 2 via a goroutine on device 0
2. MSM multiGPU test runs and is locked on thread 3 setting its device
to 1
3. Other tests run concurrently on threads other than 3 (since it is
locked)
4. MSM multiGPU test finishes and release thread 3 back to the pool but
its device is still 1
5. NTT test runs and is assigned to thread 3 --> this will fail because
the thread’s device wasn’t released back

We really only want to set a thread's device while the thread is locked.
But once we unlock a thread, it’s device should return to whatever it
was set at originally. In theory, it should always be 0 if `SetDevice`
is never used outside of `RunOnDevice` - which it shouldn’t be in most 
situations
2024-05-08 14:07:29 +03:00
Otsar
a56435d2e8 Updated hall of fame (#505)
## Describe the changes

Adds Patrick to Hall of fame
2024-05-07 14:41:38 +03:00
Stas
41294b12e0 Stas/example poly (#434)
## Describe the changes

Added examples for Poly API

---------

Co-authored-by: Yuval Shekel <yshekel@gmail.com>
2024-05-07 11:52:13 +03:00
Jeremy Felder
6134cfe177 [DOCS]: Tidy up docs (#502)
## Describe the changes

This PR tidies up docs and updates golang build instructions
2024-05-06 15:35:19 +03:00
VitaliiH
34f0212c0d rust classic benches with Criterion for ecntt/msm/ntt (#499)
Rust idiomatic benches for EC NTT, NTT, MSM
2024-05-05 10:28:41 +02:00
release-bot
f6758f3447 Bump rust crates' version
icicle-babybear@2.1.0
icicle-bls12-377@2.1.0
icicle-bls12-381@2.1.0
icicle-bn254@2.1.0
icicle-bw6-761@2.1.0
icicle-core@2.1.0
icicle-cuda-runtime@2.1.0
icicle-grumpkin@2.1.0
icicle-hash@2.1.0
icicle-stark252@2.1.0

Generated by cargo-workspaces
2024-05-01 20:11:42 +00:00
nonam3e
e2ad621f97 Nonam3e/golang/keccak (#496)
## Describe the changes

This PR adds keccak bindings + passes cfg as reference in keccak cuda functions
2024-05-01 14:08:33 +03:00
PatStiles
bdc3da98d6 FEAT(stark252 field): Adds Stark252 curve (#494)
## Describe the changes

Adds support for the stark252 base field.
2024-05-01 14:08:05 +03:00
yshekel
36e288c1fa fix: bug regarding MixedRadix coset (I)NTT for NM/MN ordering (#497)
The bug is in how twiddles array is indexed when multiplied by a mixed
(M) vector to implement (I)NTT on cosets.
The fix is to use the DIF-digit-reverse to compute the index of the element in the
natural (N) vector that moved to index 'i' in the M vector. This is
emulating a DIT-digit-reverse (which is mixing like a DIF-compute)
reorder of the twiddles array and element-wise multiplication without
reordering the twiddles memory.
2024-04-25 18:09:27 +03:00
nonam3e
f8d15e2613 update imports in golang bindings (#498)
## Describe the changes

This PR updates imports in golang bindings to the v2 version
2024-04-25 03:46:14 +07:00
release-bot
14b39b57cc Bump rust crates' version
icicle-babybear@2.0.1
icicle-bls12-377@2.0.1
icicle-bls12-381@2.0.1
icicle-bn254@2.0.1
icicle-bw6-761@2.0.1
icicle-core@2.0.1
icicle-cuda-runtime@2.0.1
icicle-grumpkin@2.0.1
icicle-hash@2.0.1

Generated by cargo-workspaces
2024-04-24 07:13:05 +00:00
Jeremy Felder
999167afe1 [PATCH]: Update module with v2 versioning (#495)
## Describe the changes

This PR fixes the issue of v2 ICICLE not being discovered by Go's
packaging service by adding the required "v2" to the module path:
https://go.dev/doc/modules/release-workflow#breaking
2024-04-24 10:09:45 +03:00
release-bot
ff374fcac7 Bump rust crates' version
icicle-babybear@2.0.0
icicle-bls12-377@2.0.0
icicle-bls12-381@2.0.0
icicle-bn254@2.0.0
icicle-bw6-761@2.0.0
icicle-core@2.0.0
icicle-cuda-runtime@2.0.0
icicle-grumpkin@2.0.0
icicle-hash@2.0.0

Generated by cargo-workspaces
2024-04-23 02:30:18 +00:00
ChickenLover
7265d18d48 ICICLE V2 Release (#492)
This PR introduces major updates for ICICLE Core, Rust and Golang
bindings

---------

Co-authored-by: Yuval Shekel <yshekel@gmail.com>
Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
Co-authored-by: Otsar <122266060+Otsar-Raikou@users.noreply.github.com>
Co-authored-by: VitaliiH <vhnatyk@gmail.com>
Co-authored-by: release-bot <release-bot@ingonyama.com>
Co-authored-by: Stas <spolonsky@icloud.com>
Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
Co-authored-by: ImmanuelSegol <3ditds@gmail.com>
Co-authored-by: JimmyHongjichuan <45908291+JimmyHongjichuan@users.noreply.github.com>
Co-authored-by: pierre <pierreuu@gmail.com>
Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
Co-authored-by: nonam3e <timur@ingonyama.com>
Co-authored-by: Vlad <88586482+vladfdp@users.noreply.github.com>
Co-authored-by: LeonHibnik <leon@ingonyama.com>
Co-authored-by: nonam3e <71525212+nonam3e@users.noreply.github.com>
Co-authored-by: vladfdp <vlad.heintz@gmail.com>
2024-04-23 05:26:40 +03:00
release-bot
a1dc0539ce Bump rust crates' version
icicle-bls12-377@1.10.1
icicle-bls12-381@1.10.1
icicle-bn254@1.10.1
icicle-bw6-761@1.10.1
icicle-core@1.10.1
icicle-cuda-runtime@1.10.1
icicle-grumpkin@1.10.1

Generated by cargo-workspaces
2024-04-11 07:56:32 +00:00
Jeremy Felder
cda806ff0c [PATCH]: Fix underflow and div by 0 (#471)
## Describe the changes

This PR fixes an underflow in `get_optimal_c` when bitsize is less than
16 and adds `max(1,NUM_THREADS)` to prevent div by 0 when calculating
NUM_THREADS
2024-04-11 10:45:34 +03:00
release-bot
8498a962f9 Bump rust crates' version
icicle-bls12-377@1.10.0
icicle-bls12-381@1.10.0
icicle-bn254@1.10.0
icicle-bw6-761@1.10.0
icicle-core@1.10.0
icicle-cuda-runtime@1.10.0
icicle-grumpkin@1.10.0

Generated by cargo-workspaces
2024-04-09 10:02:34 +00:00
Leon Hibnik
a7b0dc40c1 [FEAT] ReleaseDomain API (#465)
## Describe the changes

This PR adds a NTT ReleaseDomain API in Golang and Rust

## Linked Issues

Resolves #

---------

Co-authored-by: Yuval Shekel <yshekel@gmail.com>
2024-04-09 12:58:19 +03:00
Vlad
4a35eece51 transpose kernel in vec_ops and rust binding (#462)
## Describe the changes

This PR adds an extern C link to the transpose kernel, now in
vec_ops.cu.
Also Rust binding, and I updated the test check_ntt_batch to use the new
transpose function.
The test passes.

## Linked Issues

Resolves #

---------

Co-authored-by: LeonHibnik <leon@ingonyama.com>
2024-04-09 08:47:33 +03:00
VitaliiH
4c9b3c00a5 Devmode to Reduce compilation time (including G2 and ECNTT) (#395)
devmode to reduce compilation time
2024-04-09 06:09:04 +02:00
Jeremy Felder
c6719167ce [FEAT]: golang device slice ranges (#463)
## Describe the changes

This PR adds the capability to slice a DeviceSlice, allowing portions of
data that are already on the device to be reused.

Additionally, this PR removes the need for a HostSlice underlying type
to implement a Size function and uses unsafe.Sizeof instead. This
together with #407 will allow direct usage of gnark-crypto types with
HostSlice without the need for converting to ICICLE types

---------

Co-authored-by: nonam3e <timur@ingonyama.com>
2024-04-08 19:42:03 +03:00
Leon Hibnik
cd3769d6b7 Fix Golang TestNttDeviceAsync (#461)
## Describe the changes

This PR fixes TestNttDeviceAsync by adding a missing call to initDomain

## Linked Issues

Resolves #
2024-04-08 17:47:10 +03:00
DmytroTym
b93b1d0aaf NTT inplace in Rust (#453)
## Describe the changes

Due to Rust's ownership rules, we can't run NTT inplace using the
[`ntt`](https://github.com/ingonyama-zk/icicle/blob/v1.9.1/wrappers/rust/icicle-core/src/ntt/mod.rs#L139)
function. Which is why we saw a need to add a separate function a couple
of times.

Incidentally an issue with radix-2 NTT was found when ran inplace,
`__syncthreads()` was used in reverse order kernel as if it was a global
barrier for all blocks and not block-local one. Thus data race happened
that is fixed by this PR.
2024-04-08 10:04:04 +03:00
Leon Hibnik
6a96eef8dc add golang multigpu to sidebar (#449)
This PR adds multi GPU golang documentation to dev sidebar
2024-04-08 09:20:29 +03:00
JimmyHongjichuan
95ab6de059 fix: use the log2 in lib std explicitly to prevent makefile from link… (#459)
…ing other log2 func

## Describe the changes

This PR adds "std" as prefix on log2 function of
icicle/appUtils/msm/msm.cu to explicitly use std::log2 for MSM
calculatation.

## Linked Issues

https://github.com/ingonyama-zk/icicle/issues/458

Resolves #

Co-authored-by: pierre <pierreuu@gmail.com>
2024-04-07 07:58:53 +03:00
Yuval Shekel
9c9311bee0 golang multi-device MSM test temporarily disabled due to issues related to golang tests env 2024-04-04 23:23:18 +03:00
Yuval Shekel
406020bda6 fix: NTT release domain linkage 2024-04-04 23:23:18 +03:00
release-bot
25ac705c3b Bump rust crates' version
icicle-bls12-377@1.9.1
icicle-bls12-381@1.9.1
icicle-bn254@1.9.1
icicle-bw6-761@1.9.1
icicle-core@1.9.1
icicle-cuda-runtime@1.9.1
icicle-grumpkin@1.9.1

Generated by cargo-workspaces
2024-03-27 19:00:07 +00:00
VitaliiH
ef757e8210 hotfix for large ecntt (#448)
hotfix for large ECNTTs
2024-03-27 18:31:50 +01:00
Otsar
2c1431d904 Update Hall of fame in 'README.md' (#445)
Added v1.8's contributors to hall of fame
2024-03-27 16:57:41 +02:00
ImmanuelSegol
77ebc4848e Docs 1.8 (#436) 2024-03-25 08:54:17 -04:00
Yuval Shekel
919ff42f49 fix: NTT input is const 2024-03-24 16:26:10 +02:00
release-bot
a1ff989740 Bump rust crates' version
icicle-bls12-377@1.9.0
icicle-bls12-381@1.9.0
icicle-bn254@1.9.0
icicle-bw6-761@1.9.0
icicle-core@1.9.0
icicle-cuda-runtime@1.9.0
icicle-grumpkin@1.9.0

Generated by cargo-workspaces
2024-03-21 07:11:47 +00:00
Otsar
1f2144a57c Removed "machines using ICICLE" static badge (#442) 2024-03-21 09:04:19 +02:00
Jeremy Felder
db4c07dcaf Golang bindings for ECNTT (#433) 2024-03-21 09:04:00 +02:00
ChickenLover
d4f39efea3 Add Keccak hash function (#435)
This PR adds support for Keccak-256 and Keccak-512. It only adds them in
c++. There is no way of adding rust or golang wrappers rn as it requires
having an `icicle-common` create / mod
2024-03-20 22:30:19 +02:00
Yuval Shekel
7293058246 fix: (golang) MSM multi device test reset to original device after test is done 2024-03-20 16:27:11 +02:00
Yuval Shekel
03136f1074 fix: (golang) add missing NttAlgorithm field in NTTConfig 2024-03-20 16:27:11 +02:00
Yuval Shekel
3ef0d0c66e MSM scalars and points params are const
- This is required to be able to compute MSM on polynomial coefficients that are accessible by const only.
2024-03-20 16:27:11 +02:00
Stas
0dff1f9302 Use multi-threaded CUDA compilation to spped up compilation (#439)
## Describe the changes

Speed up CUDA c++ compile time using multi-threaded compilation
(--split-compile flag).
The tests on 8 core machine show ~2x acceleration.

## Linked Issues

Compiling c++ takes long time
2024-03-18 16:40:30 -04:00
ChickenLover
0d806d96ca tidy (#437) 2024-03-19 00:59:10 +07:00
release-bot
b6b5011a47 Bump rust crates' version
icicle-bls12-377@1.8.0
icicle-bls12-381@1.8.0
icicle-bn254@1.8.0
icicle-bw6-761@1.8.0
icicle-core@1.8.0
icicle-cuda-runtime@1.8.0
icicle-grumpkin@1.8.0

Generated by cargo-workspaces
2024-03-13 21:38:17 +00:00
DmytroTym
7ac463c3d9 MSM pre-computation (#427)
## Brief description

This PR adds pre-computation to the MSM, for some theory see
[this](https://youtu.be/KAWlySN7Hm8?si=XeR-htjbnK_ySbUo&t=1734) timecode
of Niall Emmart's talk.
In terms of public APIs, one method is added. It does the
pre-computation on-device leaving resulting data on-device as well. No
extra structures are added, only `precompute_factor` from `MSMConfig` is
now activated.

## Performance

While performance gains are for now often limited by our inflexibility
in choice of `c` (for example, very large MSMs get basically no speedup
from pre-compute because currently `c` cannot be larger than 16),
there's still a number of MSM sizes which get noticeable improvement:

| Pre-computation factor | bn254 size `2^20` MSM, ms. | bn254 size
`2^12` MSM, size `2^10` batch, ms. | bls12-381 size `2^20` MSM, ms. |
bls12-381 size `2^12` MSM, size `2^10` batch, ms. |
| ------------- | ------------- | ------------- | ------------- |
------------- |
| 1  | 14.1  | 82.8  | 25.5  | 136.7  |
| 2  | 11.8  | 76.6  | 20.3  | 123.8  |
| 4  | 10.9  | 73.8  | 18.1  | 117.8  |
| 8  | 10.6  | 73.7  | 17.2  | 116.0  |

Here for example pre-computation factor = 4 means that alongside each
original base point, we pre-compute and pass into the MSM 3 of its
"shifted" versions. Pre-computation factor = 1 means no pre-computation.
GPU used for benchmarks is a 3090Ti.

## TODOs and open questions

- Golang APIs are missing;
- I mentioned that to utilise pre-compute to its full potential we need
arbitrary choice of `c`. One issue with this is that pre-compute will
become dependent on `c`. For now this is not the case as `c` can only be
a power of 2 and powers of 2 can always share the same pre-computation.
So apparently we need to make `c` a parameter of the precompute function
to future-proof it from a breaking change. This is pretty unnatural and
counterintuitive as `c` is typically chosen in runtime after pre-compute
is done but I don't really see another way, pls let me know if you do.
UPD: `c` is added into pre-compute function, for now it's unused and
it's documented how it will change in the future.

Resolves https://github.com/ingonyama-zk/icicle/issues/147
Co-authored with @ChickenLover

---------

Co-authored-by: ChickenLover <romangg81@gmail.com>
Co-authored-by: nonam3e <timur@ingonyama.com>
Co-authored-by: nonam3e <71525212+nonam3e@users.noreply.github.com>
Co-authored-by: LeonHibnik <leon@ingonyama.com>
2024-03-13 23:25:16 +02:00
HadarIngonyama
287f53ff16 NTT columns batch (#424)
This PR adds the columns batch feature - enabling batch NTT computation
to be performed directly on the columns of a matrix without having to
transpose it beforehand, as requested in issue #264.

Also some small fixes to the reordering kernels were added and some
unnecessary parameters were removes from functions interfaces.

---------

Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
2024-03-13 18:46:47 +02:00
Jeremy Felder
89082fb561 FEAT: MultiGPU for golang bindings (#417)
## Describe the changes

This PR adds multi gpu support in the golang bindings.

Tha main changes are to DeviceSlice which now includes a `deviceId`
attribute specifying which device the underlying data resides on and
checks for correct deviceId and current device when using DeviceSlices
in any operation.

In Go, most concurrency can be done via Goroutines (described as
lightweight threads - in reality, more of a threadpool manager),
however, there is no guarantee that a goroutine stays on a specific host
thread. Therefore, a function `RunOnDevice` was added to the
cuda_runtime package which locks a goroutine into a specific host
thread, sets a current GPU device, runs a provided function, and unlocks
the goroutine from the host thread after the provided function finishes.
While the goroutine is locked to the hsot thread, the Go runtime will
not assign other goroutines to that host thread
2024-03-13 16:19:45 +02:00
hhh_QC
08ec0b1ff6 update go install source in Dockerfile (#428) 2024-03-10 10:47:08 +02:00
Jeremy Felder
fa219d9c95 Fix release flow with deploy key and caching (#425)
## Describe the changes

This PR fixes the release flow action
2024-03-10 08:57:35 +02:00
DmytroTym
0e84fb4b76 feat: add warmup for CudaStream (#422)
## Describe the changes

Add a non-blocking `warmup` function to `CudaStream` 

> when you run the benchmark (e.g. the msm example you have) the first
instance is always slow, with a constant overhead of 200~300ms cuda
stream warmup. and I want to get rid of that in my application by
warming it up in parallel while my host do something else.
2024-03-07 19:11:34 +02:00
Alex Xiong
d8059a2a4e Merge pull request #1 from ingonyama-zk/feat/warmup
Warmup function added
2024-03-07 18:18:18 +08:00
DmytroTym
4a65758408 Merge branch 'main' into feat/warmup 2024-03-06 22:08:45 +02:00
Jeremy Felder
1abd2ef9c9 Bump rust crates' version
icicle-bls12-377@1.7.0
icicle-bls12-381@1.7.0
icicle-bn254@1.7.0
icicle-bw6-761@1.7.0
icicle-core@1.7.0
icicle-cuda-runtime@1.7.0
icicle-grumpkin@1.7.0

Generated by cargo-workspaces
2024-03-06 22:05:10 +02:00
Jeremy Felder
9d402df0cf Release flow CI (#423)
## Describe the changes

This PR:
- Moves common crate attributes to the workspace Cargo.toml. 
- Adds a manual release flow for bumping, tagging, and draft release
2024-03-06 21:41:48 +02:00
DmytroTym
7185657ff7 Warmup function 2024-03-06 18:13:23 +02:00
Alex Xiong
b22aa02e91 fix: cargo fmt 2024-03-06 13:10:12 +00:00
ImmanuelSegol
af6bfc9ab0 golang docs (#413)
## Describe the changes

This PR...

## Linked Issues

Resolves #

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
2024-03-06 08:59:32 -04:00
Alex Xiong
b108c71bdd feat: add rust api for cudaFreeAsync 2024-03-06 12:44:43 +00:00
ChickenLover
9fc083916d Small features (#415)
This PR is a compilation of small improvements

 - Lock bindgen version for `icicle-cuda-runtime`
- Add an error message when trying to build on Mac (or any non
windows/linux machine)
 - Add documentation and template files for adding new curve
 - Add documentation on _params.cuh contents
- Add the script to bump all the rust crates versions to the same
version

Resolves #313
2024-03-06 13:48:34 +02:00
Jeremy Felder
87ccd62976 Fix go setup in CI (#420)
## Describe the changes

This PR adds the use of setup-go in the CI to ensure that `go` is
installed properly and caches dependencies and build outputs by default
2024-03-06 12:31:24 +02:00
Jeremy Felder
d8f2313a01 Fix the rust changed files glob pattern (#419)
## Describe the changes

This PR fixes the glob pattern of changed files for rust
2024-03-05 12:27:08 +02:00
ChickenLover
a2ae7a9e2f minor changes to cuda (#414)
## Describe the changes

 - Fix include statements to use absolute path
 - Remove stale comments and code parts
 - Fix test_kernels.cu bug
2024-03-04 16:52:03 +07:00
ImmanuelSegol
d98b851d62 fix-primitives-links (#411) 2024-03-03 13:42:28 +07:00
ImmanuelSegol
4d19ca0b98 Vec ops docs (#410)
* fix examples

* revert

* refactor

* refactor

* refactor

* refactor

* refactor
2024-02-29 13:26:17 -05:00
Otsar
2e20be56f7 Added badges (#409)
* Update overview.md

Changed and updated static badges

* Update README.md

Added badge to readme
2024-02-29 15:26:12 +02:00
ImmanuelSegol
afa61c64f4 fix dcos base url (#408)
refactor
2024-02-29 14:18:53 +02:00
Jeremy Felder
7934e15768 Release v1.6.0 (#406)
Release v1.6.0

- Add vector operations to golang bindings #399 
- Add Pederson commitment example in c++ #397
- Update CI for faster/better flow #398 
- Fix dev docs CI #405
2024-02-29 08:27:10 +02:00
ImmanuelSegol
76939f34e0 fix docs deploy github action (#405) 2024-02-28 18:09:04 +02:00
Jeremy Felder
1c1b2bab64 CI: move to language specific flows (#398)
Updates the CI to:
- run per supported language
- conditional run logic
- pipelined jobs for failing fast
- additional parallelization
- run golang build on windows
- reuse the check-changed-files workflow
2024-02-28 18:09:03 +02:00
Stas
d90081926f Pedersen commitment example in c++ (#397)
* initial commit

* random elliptic points

* initial complete example

* public random seed to prevent knowing dlogs

* cleaned up code

* add README

* Update examples/c++/pedersen-commitment/README.md

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>

* updates to PR comments

* codespell compliance

* corrected terminology in README

---------

Co-authored-by: Jeremy Felder <jeremy.felder1@gmail.com>
2024-02-28 18:09:03 +02:00
Jeremy Felder
656dd18cf8 Add vector operations for golang bindings (#399) 2024-02-28 18:09:03 +02:00
Jeremy Felder
40309329fb Migrate docs website + improved docs (#389) (#403)
migrate docs website + improved docs (#389)

* Update README.md (#385)

* refactor

* refactor

* refactor

* rename task

* update codespell

* multi gpu docs (#391)

* Refactor

* refacotr

* fix typo

* Apply suggestions from code review



* refactor

* refactor

---------

Co-authored-by: ImmanuelSegol <3ditds@gmail.com>
Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
Co-authored-by: ChickenLover <Romangg81@gmail.com>
2024-02-28 14:40:04 +02:00
Jeremy Felder
e6035698b5 Release v1.5.0 (#393)
# Contents of this release

Examples: multi-gpu example #381
Examples: updates example compares Radix2 and MixedRadix NTTs #383
Feat: add vector operations bindings to Rust #384 
Examples: update examples with new vec ops #388 
Feat: Grumpkin curve implementation #379 
Feat: mixed-radix NTT fast twiddles mode #382 
Docs: Update README.md #385 #387 
README: Update Hall of Fame section #394 
Examples: add rust poseidon example #392 
Feat: GoLang bindings for v1.x #386
2024-02-23 10:15:18 +02:00
Jeremy Felder
e8cd2d7a98 GoLang bindings for v1.x (#386) 2024-02-22 20:52:48 +02:00
ChickenLover
efda4de48f add rust poseidon example (#392)
add rust poseidon example
2024-02-22 19:47:40 +02:00
ChickenLover
402c9dfb53 Update Hall of Fame section (#394)
Add nonam3e as contributor
2024-02-22 19:39:58 +02:00
ChickenLover
9a6ab924c2 Update README.md (#385) (#387) 2024-02-22 11:39:15 +07:00
yshekel
275b2f4958 feature: mixed-radix NTT fast twiddles mode (#382)
- this mode is allocating additional 4N twiddle-factors to achieve faster computation
- enabled by flag for initDomain(). Defaults to false.

Co-authored-by: hadaringonyama <hadar@ingonyama.com>
2024-02-22 00:02:02 +02:00
nonam3e
4b221e9665 Grumpkin curve implementation (#379) 2024-02-21 23:20:28 +07:00
ChickenLover
965bf757f9 update examples with new vec ops (#388) 2024-02-21 22:30:40 +07:00
ChickenLover
f9755980f0 add vector operations bindings to Rust (#384)
* add vector operations bindings to Rust
2024-02-21 21:17:10 +07:00
Stas
bb62e716b4 Temp/stas/muli gpu example (#381)
## Describe the changes

This PR adds Multi-GPU Poseidon example

## Linked Issues

Some minor on-device memory issues require attention from devs, please
help
2024-02-20 19:45:44 -06:00
stas
c046fd17c6 removed my comments from poseidon.cuh 2024-02-20 20:43:59 -05:00
stas
82d1ff4769 fixed spelling in poseidon.cuh 2024-02-20 20:40:45 -05:00
Stas
d1f19af64d Merge branch 'dev' into temp/stas/muli-gpu-example 2024-02-20 19:07:48 -06:00
stas
b1af193f6f fixed spelling 2024-02-20 19:14:25 -05:00
Stas
49c7fb0db1 updates example compares Radix2 and MixedRadix NTTs (#383)
## Describe the changes

Update to cover new NTT algorithms
2024-02-20 18:05:39 -06:00
stas
4664cfded5 complied with reviewer's comments 2024-02-19 15:59:49 -05:00
ChickenLover
fc6badcb35 Update README.md (#385) 2024-02-19 18:54:10 +07:00
stas
fb9e5c8511 updates example compares Radix2 and MixedRadix NTTs 2024-02-18 18:40:01 -05:00
Stas Polonsky
518a3ad9b6 ready for PR 2024-02-17 00:18:21 +00:00
Stas Polonsky
6681be549a fixed on-device memory issue 2024-02-16 19:43:58 +00:00
Stas Polonsky
319358427f cudaSetDevice in the thread function 2024-02-16 16:35:04 +00:00
Stas Polonsky
8dd52306dc update README 2024-02-15 23:07:33 +00:00
Stas Polonsky
418c3d844b completed example 2024-02-15 22:10:15 +00:00
DmytroTym
15a63cc549 Release v1.4.0 (#378)
## Contents of this release

[FEAT]: support for multi-device execution:
https://github.com/ingonyama-zk/icicle/pull/356
[FEAT]: full support for new mixed-radix NTT:
https://github.com/ingonyama-zk/icicle/pull/367,
https://github.com/ingonyama-zk/icicle/pull/368 and
https://github.com/ingonyama-zk/icicle/pull/371
[FEAT]: examples for Poseidon hash and tree builder based on it
(currently only on C++ side):
https://github.com/ingonyama-zk/icicle/pull/375
[PERF]: MSM performance upgrades & zero point handling:
https://github.com/ingonyama-zk/icicle/pull/372
2024-02-15 22:32:56 +02:00
ImmanuelSegol
275eaa9904 bump version 2024-02-15 19:36:18 +00:00
DmytroTym
a91397e2c1 MSM improvements (#372)
* Improved MSM

* Zero point handling in large buckets

* Fixed affine zero point conversion for arkworks

* cargo fmt

* Addressed comments

* MSM comments

* All zero scalars case handled

* clang format
2024-02-15 20:02:10 +02:00
ImmanuelSegol
29675bb40d executes without errors 2024-02-15 16:45:33 +00:00
ChickenLover
3b48af55d7 fix declarator (#376)
Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
2024-02-15 22:46:57 +07:00
ImmanuelSegol
481f144dc8 debug 2024-02-15 15:11:20 +00:00
ImmanuelSegol
086d36dd42 Merge remote-tracking branch 'origin/dev' into temp/stas/muli-gpu-example 2024-02-15 15:03:41 +00:00
DmytroTym
6854dbf06a Fix conflicts main (#377)
This PR fixes the conflicts between main and dev
2024-02-15 15:37:15 +02:00
ChickenLover
0929161a26 Merge branch 'dev' into fix-conflicts-main 2024-02-15 16:10:09 +07:00
ChickenLover
4f471ffa2a remove rust toolchains 2024-02-15 16:08:22 +07:00
DmytroTym
27e85d400a Poseidon examples C++ only (#375)
https://github.com/ingonyama-zk/icicle/pull/365 but without Rust which
for now doesn't work.
2024-02-15 10:46:09 +02:00
Yuval Shekel
ba6c3ae59c merge NTT part 2024-02-15 10:28:02 +02:00
ChickenLover
fd08925ed4 merge WIP 2024-02-15 14:57:09 +07:00
ChickenLover
66018f2367 rename example folder 2024-02-15 14:13:18 +07:00
stas
62cf733c5f answers Roman's comments 2024-02-15 14:07:48 +07:00
DmytroTym
76c3b4ba01 Merge branch 'dev' into poseidon-examples-no-cuda 2024-02-15 08:27:28 +02:00
ImmanuelSegol
4d75fbac93 issue with init_optimized_poseidon_constants 2024-02-14 22:28:17 +00:00
VitaliiH
774250926c multi card support (#356)
multi-GPU support
2024-02-14 22:29:30 +01:00
DmytroTym
2008259adc Merge branch 'dev' into poseidon-examples-no-cuda 2024-02-14 21:10:25 +02:00
DmytroTym
303a3b8770 Temporarily removed Rust Poseidon example 2024-02-14 21:06:10 +02:00
yshekel
0d70a0c003 fix: verify NTT size is a power of two (#374) 2024-02-14 17:54:31 +02:00
yshekel
c9e1d96b65 fix: wrong type in comment (#373)
NTTConfig -> PoseidonConfig
2024-02-14 15:51:25 +02:00
yshekel
a02459c64d Mixed-radix NTT support all orderings (#371)
- Mixed-radix NTT orderings support
- radix-2 small refactor: split core logic to function and renamed ct_butterfly to dit
- testing both radix2 and mixed-radix algs for all ntt tests
2024-02-13 15:49:24 +02:00
yshekel
ae060313db Mixed radix NTT coset support (#368) 2024-02-12 18:30:09 +02:00
yshekel
e16ce1026d Mixed-radix NTT batch support (#367)
Co-authored-by: hadaringonyama <hadar@ingonyama.com>
2024-02-12 14:50:22 +02:00
Leon Hibnik
d84ffd2679 Release/1.3.0 (#370)
Release 1.3.0:
* [FEAT] Mixed-radix NTT design
* [FEAT] example paths changed
2024-02-09 13:53:27 +02:00
ChickenLover
a65f44ad31 fix versioning problems 2024-02-09 15:55:36 +07:00
ChickenLover
8c1750ea97 Feat/roman/display functions (#366)
* fix display and debug traits

* leave only one impl for printing scalars
2024-02-09 14:40:07 +07:00
stas
582107fc7c added c++ example Poseidon-hash 2024-02-08 17:36:56 -05:00
Jeremy Felder
b5923a1791 Add concurrency group to examples workflow (#361) 2024-02-08 20:43:12 +00:00
Jeremy Felder
18fdd059da Fix: examples path deps (#363)
Change rust example deps to use paths

Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
2024-02-08 20:43:12 +00:00
yshekel
382bec4ad3 Mixed-radix NTT algorithm
Co-authored-by: hadaringonyama <hadar@ingonyama.com>
2024-02-08 20:43:12 +00:00
Jeremy Felder
d367a8c1e0 Bump for release 2024-02-08 20:43:12 +00:00
Jeremy Felder
3cbdfe7f07 Add concurrency group to examples workflow (#361) 2024-02-08 16:35:48 +02:00
Jeremy Felder
e77173f266 Fix: examples path deps (#363)
Change rust example deps to use paths

Co-authored-by: Leon Hibnik <107353745+LeonHibnik@users.noreply.github.com>
2024-02-08 16:23:44 +02:00
yshekel
3582df2669 Mixed-radix NTT algorithm
Co-authored-by: hadaringonyama <hadar@ingonyama.com>
2024-02-08 13:52:00 +02:00
ImmanuelSegol
04b1b3dda5 refactor: add a basic example 2024-02-07 20:07:21 -04:00
Jeremy Felder
b6dded89cd Release v1.2.0 (#364)
Release v1.2.0:

- [FEAT] Add Poseidon hash as primitive
- [FEAT] Add Merkle tree using poseidon hash
- [BUG] Fix NTT overflow when using large cosets
2024-02-07 17:01:09 +02:00
Jeremy Felder
5a138367f8 (chore): bump rust crate versions (#362)
bump rust crate versions
2024-02-07 14:49:54 +02:00
ChickenLover
a3fc01d88d Implement Poseidon and TreeBuilder (#352)
* BW scalar field is now the same as BLS base field

* add poseidon

* add merkle tree builder

* poseidon rust bindings

* implement rust bindings

* add doc comments

* remove global poseidon constants

* add custom constants API and script for generating new constants

* add the rest of the curves for poseidon

* add all the curves for real

* misname bls12-377

* typo

* partial rounds

* minor fixes

* small tweak for big performance boost

* add CHK_INIT_IF_RETURN

---------

Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
2024-02-07 14:49:54 +02:00
DmytroTym
d84cab79fd Changed long to int64 2024-02-07 14:49:54 +02:00
DmytroTym
46d7b88f6e Fixed overflow in large coset NTTs 2024-02-07 14:49:54 +02:00
Jeremy Felder
b20ef93c2d Merge branch 'main' into dev 2024-02-07 14:34:59 +02:00
Jeremy Felder
6b1b735576 (chore): bump rust crate versions (#362)
bump rust crate versions
2024-02-07 14:29:21 +02:00
ChickenLover
b2eecd02af Implement Poseidon and TreeBuilder (#352)
* BW scalar field is now the same as BLS base field

* add poseidon

* add merkle tree builder

* poseidon rust bindings

* implement rust bindings

* add doc comments

* remove global poseidon constants

* add custom constants API and script for generating new constants

* add the rest of the curves for poseidon

* add all the curves for real

* misname bls12-377

* typo

* partial rounds

* minor fixes

* small tweak for big performance boost

* add CHK_INIT_IF_RETURN

---------

Co-authored-by: DmytroTym <dmytrotym1@gmail.com>
2024-02-07 00:31:49 +07:00
DmytroTym
b13d993f5d Fixed overflow in large coset NTTs (#358)
If domain has size 2^17, NTT on size 2^16 coset generated by `-1` fails. This happens due to index in `BatchMulKernel` overflowing, fixed by using `long` instead of `int`.
2024-02-05 18:53:26 +02:00
DmytroTym
4f6b4f7dcf Merge branch 'dev' into coset_overflow_fix 2024-02-05 16:44:06 +02:00
Jeremy Felder
19721cfab6 Add missed bump to dev (#359)
Add missed bump to dev
2024-02-05 15:44:37 +02:00
DmytroTym
3c068ae4e7 Changed long to int64 2024-02-05 14:17:00 +02:00
Jeremy Felder
bfd510b3bb Bump for release 2024-02-05 13:33:00 +02:00
DmytroTym
d2b9ec1908 Fixed overflow in large coset NTTs 2024-02-05 13:14:03 +02:00
Jeremy Felder
77a7613aa2 Release v1.1.0 (#357)
Release v1.1.0:
- Updated examples to use the new API
- [c++] Curve specific functions using macros
- [c++] Consolidate MSM and Batch MSM to single function
- [CI] Add codespell in CI
- [FIX] Windows rust build
- [FIX] G2 on rust bindings
- [FIX] Bw6 using bls12377
2024-01-31 20:30:44 +02:00
Jeremy Felder
5a96f9937d Bump for release 2024-01-31 16:34:14 +02:00
Stas
aaa3808c81 Update examples for new api (#355)
## Describe the changes

Make sure the examples comply with new API
2024-01-31 08:59:21 +02:00
stas
759b7b26d6 Update examples to use latest API 2024-01-31 08:59:21 +02:00
Leon Hibnik
1874ade68a update readme links (#346)
Update README.md
2024-01-24 12:26:38 +02:00
DmytroTym
96fe5bf283 G2 fix and BW6 scalar field on the Rust side (#341)
* BW scalar field is now the same as BLS base field in Rust

* G2 fixed and added into Rust
2024-01-24 11:51:22 +02:00
Otsar
c1a32a9879 Update README.md (#339)
Added badge
2024-01-11 18:34:32 +02:00
824 changed files with 141287 additions and 24399 deletions

View File

@@ -1,3 +1,6 @@
inout
crate
lmit
mut
uint
dout

View File

@@ -1,8 +1,13 @@
golang:
- goicicle/**/*.go'
- wrappers/golang/**/*.go
- wrappers/golang/**/*.h
- wrappers/golang/**/*.tmpl
- go.mod
- .github/workflows/golang.yml
rust:
- wrappers/rust
- wrappers/rust/**/*
- '!wrappers/rust/README.md'
- .github/workflows/rust.yml
cpp:
- icicle/**/*.cu
- icicle/**/*.cuh
@@ -10,4 +15,11 @@ cpp:
- icicle/**/*.hpp
- icicle/**/*.c
- icicle/**/*.h
- icicle/CMakeLists.txt
- icicle/CMakeLists.txt
- .github/workflows/cpp_cuda.yml
- icicle/cmake/Common.cmake
- icicle/cmake/CurvesCommon.cmake
- icicle/cmake/FieldsCommon.cmake
examples:
- examples/**/*
- .github/workflows/examples.yml

View File

@@ -0,0 +1,44 @@
name: Check Changed Files
on:
workflow_call:
outputs:
golang:
description: "Flag for if GoLang files changed"
value: ${{ jobs.check-changed-files.outputs.golang }}
rust:
description: "Flag for if Rust files changed"
value: ${{ jobs.check-changed-files.outputs.rust }}
cpp_cuda:
description: "Flag for if C++/CUDA files changed"
value: ${{ jobs.check-changed-files.outputs.cpp_cuda }}
examples:
description: "Flag for if example files changed"
value: ${{ jobs.check-changed-files.outputs.examples }}
jobs:
check-changed-files:
name: Check Changed Files
runs-on: ubuntu-22.04
outputs:
golang: ${{ steps.changed_files.outputs.golang }}
rust: ${{ steps.changed_files.outputs.rust }}
cpp_cuda: ${{ steps.changed_files.outputs.cpp_cuda }}
examples: ${{ steps.changed_files.outputs.examples }}
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Get all changed files
id: changed-files-yaml
uses: tj-actions/changed-files@v39
# https://github.com/tj-actions/changed-files#input_files_yaml_from_source_file
with:
files_yaml_from_source_file: .github/changed-files.yml
- name: Run Changed Files script
id: changed_files
# https://github.com/tj-actions/changed-files#outputs-
run: |
echo "golang=${{ steps.changed-files-yaml.outputs.golang_any_modified }}" >> "$GITHUB_OUTPUT"
echo "rust=${{ steps.changed-files-yaml.outputs.rust_any_modified }}" >> "$GITHUB_OUTPUT"
echo "cpp_cuda=${{ steps.changed-files-yaml.outputs.cpp_any_modified }}" >> "$GITHUB_OUTPUT"
echo "examples=${{ steps.changed-files-yaml.outputs.examples_any_modified }}" >> "$GITHUB_OUTPUT"

View File

@@ -4,17 +4,17 @@ on:
pull_request:
branches:
- main
- dev
- V2
jobs:
spelling-checker:
name: Check Spelling
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: codespell-project/actions-codespell@v2
with:
# https://github.com/codespell-project/actions-codespell?tab=readme-ov-file#parameter-skip
skip: ./**/target,./**/build
skip: ./**/target,./**/build,./docs/*.js,./docs/*.json
# https://github.com/codespell-project/actions-codespell?tab=readme-ov-file#parameter-ignore_words_file
ignore_words_file: .codespellignore

91
.github/workflows/cpp_cuda.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: C++/CUDA
on:
pull_request:
branches:
- main
- V2
push:
branches:
- main
- V2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-changed-files:
uses: ./.github/workflows/check-changed-files.yml
check-format:
name: Check Code Format
runs-on: ubuntu-22.04
needs: check-changed-files
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check clang-format
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: if [[ $(find ./ \( -path ./icicle/build -prune -o -path ./**/target -prune -o -path ./examples -prune \) -iname *.h -or -iname *.cuh -or -iname *.cu -or -iname *.c -or -iname *.cpp | xargs clang-format --dry-run -ferror-limit=1 -style=file 2>&1) ]]; then echo "Please run clang-format"; exit 1; fi
test-linux-curve:
name: Test on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
strategy:
matrix:
curve:
- name: bn254
build_args: -DG2=ON -DECNTT=ON
- name: bls12_381
build_args: -DG2=ON -DECNTT=ON
- name: bls12_377
build_args: -DG2=ON -DECNTT=ON
- name: bw6_761
build_args: -DG2=ON -DECNTT=ON
- name: grumpkin
build_args:
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Build curve
working-directory: ./icicle
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
mkdir -p build && rm -rf build/*
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=ON -DCURVE=${{ matrix.curve.name }} ${{ matrix.curve.build_args }} -S . -B build
cmake --build build -j
- name: Run C++ curve Tests
working-directory: ./icicle/build/tests
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: ctest
test-linux-field:
name: Test on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
strategy:
matrix:
field:
- name: babybear
build_args: -DEXT_FIELD=ON
- name: stark252
build_args: -DEXT_FIELD=OFF
- name: m31
build_args: -DEXT_FIELD=ON
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Build field
working-directory: ./icicle
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
mkdir -p build && rm -rf build/*
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=ON -DFIELD=${{ matrix.field.name }} ${{ matrix.field.build_args }} -S . -B build
cmake --build build -j
- name: Run C++ field Tests
working-directory: ./icicle/build/tests
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: ctest

46
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: Deploy to GitHub Pages
on:
push:
branches:
- main
paths:
- 'docs/**'
permissions:
contents: write
jobs:
deploy:
name: Deploy to GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
path: 'repo'
- uses: actions/setup-node@v3
with:
node-version: 18
cache: npm
cache-dependency-path: ./repo/docs/package-lock.json
- name: Install dependencies
run: npm install --frozen-lockfile
working-directory: ./repo/docs
- name: Build website
run: npm run build
working-directory: ./repo/docs
- name: Copy CNAME to build directory
run: echo "dev.ingonyama.com" > ./build/CNAME
working-directory: ./repo/docs
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./repo/docs/build
user_name: github-actions[bot]
user_email: 41898282+github-actions[bot]@users.noreply.github.com
working-directory: ./repo/docs

60
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
# This workflow is a demo of how to run all examples in the Icicle repository.
# For each language directory (c++, Rust, etc.) the workflow
# (1) loops over all examples (msm, ntt, etc.) and
# (2) runs ./compile.sh and ./run.sh in each directory.
# The script ./compile.sh should compile the example and ./run.sh should run it.
# Each script should return 0 for success and 1 otherwise.
name: Examples
on:
pull_request:
branches:
- main
- V2
push:
branches:
- main
- V2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-changed-files:
uses: ./.github/workflows/check-changed-files.yml
run-examples:
runs-on: [self-hosted, Linux, X64, icicle, examples]
needs: check-changed-files
steps:
- name: Checkout
uses: actions/checkout@v4
- name: c++ examples
working-directory: ./examples/c++
if: needs.check-changed-files.outputs.cpp_cuda == 'true' || needs.check-changed-files.outputs.examples == 'true'
run: |
# loop over all directories in the current directory
for dir in $(find . -mindepth 1 -maxdepth 1 -type d); do
if [ -d "$dir" ]; then
echo "Running command in $dir"
cd $dir
./compile.sh
./run.sh
cd -
fi
done
- name: Rust examples
working-directory: ./examples/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.examples == 'true'
run: |
# loop over all directories in the current directory
for dir in $(find . -mindepth 1 -maxdepth 1 -type d); do
if [ -d "$dir" ]; then
echo "Running command in $dir"
cd $dir
cargo run --release
cd -
fi
done

162
.github/workflows/golang.yml vendored Normal file
View File

@@ -0,0 +1,162 @@
name: GoLang
on:
pull_request:
branches:
- main
- V2
push:
branches:
- main
- V2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-changed-files:
uses: ./.github/workflows/check-changed-files.yml
check-format:
name: Check Code Format
runs-on: ubuntu-22.04
needs: check-changed-files
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: '1.20.0'
- name: Check gofmt
if: needs.check-changed-files.outputs.golang == 'true'
run: if [[ $(go list ./... | xargs go fmt) ]]; then echo "Please run go fmt"; exit 1; fi
build-curves-linux:
name: Build and test curves on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
strategy:
matrix:
curve:
- name: bn254
build_args: -g2 -ecntt
- name: bls12_381
build_args: -g2 -ecntt
- name: bls12_377
build_args: -g2 -ecntt
- name: bw6_761
build_args: -g2 -ecntt
- name: grumpkin
build_args:
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: '1.20.0'
- name: Build
working-directory: ./wrappers/golang
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# builds a single curve with the curve's specified build args
run: ./build.sh -curve=${{ matrix.curve.name }} ${{ matrix.curve.build_args }}
- name: Test
working-directory: ./wrappers/golang/curves
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
CURVE=$(echo ${{ matrix.curve.name }} | sed -e 's/_//g')
export CPATH=$CPATH:/usr/local/cuda/include
go test ./$CURVE/tests -count=1 -failfast -p 2 -timeout 60m -v
build-fields-linux:
name: Build and test fields on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
strategy:
matrix:
field:
- name: babybear
build_args: -field-ext
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: '1.20.0'
- name: Build
working-directory: ./wrappers/golang
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# builds a single field with the fields specified build args
run: ./build.sh -field=${{ matrix.field.name }} ${{ matrix.field.build_args }}
- name: Test
working-directory: ./wrappers/golang/fields
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
FIELD=$(echo ${{ matrix.field.name }} | sed -e 's/_//g')
export CPATH=$CPATH:/usr/local/cuda/include
go test ./$FIELD/tests -count=1 -failfast -p 2 -timeout 60m -v
build-hashes-linux:
name: Build and test hashes on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
strategy:
matrix:
hash:
- name: keccak
build_args:
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v5
with:
go-version: '1.20.0'
- name: Build
working-directory: ./wrappers/golang
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# builds a single hash algorithm with the hash's specified build args
run: ./build.sh -hash=${{ matrix.hash.name }} ${{ matrix.hash.build_args }}
- name: Test
working-directory: ./wrappers/golang/hash
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
HASH=$(echo ${{ matrix.hash.name }} | sed -e 's/_//g')
export CPATH=$CPATH:/usr/local/cuda/include
go test ./$HASH/tests -count=1 -failfast -p 2 -timeout 60m -v
# TODO: bw6 on windows requires more memory than the standard runner has
# Add a large runner and then enable this job
# build-windows:
# name: Build on Windows
# runs-on: windows-2022
# needs: [check-changed-files, check-format]
# strategy:
# matrix:
# curve: [bn254, bls12_381, bls12_377, bw6_761]
# steps:
# - name: Checkout Repo
# uses: actions/checkout@v4
# - name: Setup go
# uses: actions/setup-go@v5
# with:
# go-version: '1.20.0'
# - name: Download and Install Cuda
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# id: cuda-toolkit
# uses: Jimver/cuda-toolkit@v0.2.11
# with:
# cuda: '12.0.0'
# method: 'network'
# # https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
# sub-packages: '["cudart", "nvcc", "thrust", "visual_studio_integration"]'
# - name: Build libs
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# working-directory: ./wrappers/golang
# env:
# CUDA_PATH: ${{ steps.cuda-toolkit.outputs.CUDA_PATH }}
# shell: pwsh
# run: ./build.ps1 ${{ matrix.curve }} ON # builds a single curve with G2 enabled

View File

@@ -1,115 +0,0 @@
name: Build
on:
pull_request:
branches:
- main
- dev
push:
branches:
- main
- dev
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
ARCH_TYPE: native
jobs:
check-changed-files:
name: Check Changed Files
runs-on: ubuntu-22.04
outputs:
golang: ${{ steps.changed_files.outputs.golang }}
rust: ${{ steps.changed_files.outputs.rust }}
cpp_cuda: ${{ steps.changed_files.outputs.cpp_cuda }}
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Get all changed files
id: changed-files-yaml
uses: tj-actions/changed-files@v39
# https://github.com/tj-actions/changed-files#input_files_yaml_from_source_file
with:
files_yaml_from_source_file: .github/changed-files.yml
- name: Run Changed Files script
id: changed_files
# https://github.com/tj-actions/changed-files#outputs-
run: |
echo "golang=${{ steps.changed-files-yaml.outputs.golang_any_modified }}" >> "$GITHUB_OUTPUT"
echo "rust=${{ steps.changed-files-yaml.outputs.rust_any_modified }}" >> "$GITHUB_OUTPUT"
echo "cpp_cuda=${{ steps.changed-files-yaml.outputs.cpp_any_modified }}" >> "$GITHUB_OUTPUT"
build-rust-linux:
name: Build Rust on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: check-changed-files
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Build Rust
working-directory: ./wrappers/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# Building from the root workspace will build all members of the workspace by default
run: cargo build --release --verbose
build-rust-windows:
name: Build Rust on Windows
runs-on: windows-2022
needs: check-changed-files
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Download and Install Cuda
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
id: cuda-toolkit
uses: Jimver/cuda-toolkit@v0.2.11
with:
cuda: '12.0.0'
method: 'network'
# https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
sub-packages: '["cudart", "nvcc", "thrust", "visual_studio_integration"]'
- name: Build Rust Targets
working-directory: ./wrappers/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
env:
CUDA_PATH: ${{ steps.cuda-toolkit.outputs.CUDA_PATH }}
# Building from the root workspace will build all members of the workspace by default
run: cargo build --release --verbose
# TODO: Reenable once Golang bindings for v1+ is finished
# build-golang-linux:
# name: Build Golang on Linux
# runs-on: [self-hosted, Linux, X64, icicle]
# needs: check-changed-files
# steps:
# - name: Checkout Repo
# uses: actions/checkout@v3
# - name: Build CUDA libs
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# run: make all
# working-directory: ./goicicle
# TODO: Add once Golang make file supports building for Windows
# build-golang-windows:
# name: Build Golang on Windows
# runs-on: windows-2022
# needs: check-changed-files
# steps:
# - name: Checkout Repo
# uses: actions/checkout@v3
# - name: Download and Install Cuda
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# uses: Jimver/cuda-toolkit@v0.2.11
# with:
# cuda: '12.0.0'
# method: 'network'
# # https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
# sub-packages: '["cudart", "nvcc", "thrust"]'
# - name: Build cpp libs
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# run: make all
# working-directory: ./goicicle

View File

@@ -1,47 +0,0 @@
name: Format
on:
pull_request:
branches:
- main
- dev
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
formatting-rust:
name: Check Rust Code Formatting
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check rustfmt
working-directory: ./wrappers/rust
# "-name tagret -prune" removes searching in any directory named "target"
# Formatting by single file is necessary due to generated files not being present
# before building the project.
# e.g. icicle-cuda-runtime/src/bindings.rs is generated and icicle-cuda-runtime/src/lib.rs includes that module
# causing rustfmt to fail.
run: if [[ $(find . -name target -prune -o -iname *.rs -print | xargs cargo fmt --check --) ]]; then echo "Please run cargo fmt"; exit 1; fi
# - name: Check clippy
# run: cargo clippy --no-deps --all-features --all-targets
formatting-golang:
name: Check Golang Code Formatting
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check gofmt
run: if [[ $(go list ./... | xargs go fmt) ]]; then echo "Please run go fmt"; exit 1; fi
formatting-cpp-cuda:
name: Check C++/CUDA Code Formatting
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check clang-format
run: if [[ $(find ./ \( -path ./icicle/build -prune -o -path ./**/target -prune -o -path ./examples -prune \) -iname *.h -or -iname *.cuh -or -iname *.cu -or -iname *.c -or -iname *.cpp | xargs clang-format --dry-run -ferror-limit=1 -style=file 2>&1) ]]; then echo "Please run clang-format"; exit 1; fi

View File

@@ -1,94 +0,0 @@
name: Test
on:
pull_request:
branches:
- main
- dev
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
ARCH_TYPE: native
jobs:
check-changed-files:
name: Check Changed Files
runs-on: ubuntu-22.04
outputs:
golang: ${{ steps.changed_files.outputs.golang }}
rust: ${{ steps.changed_files.outputs.rust }}
cpp_cuda: ${{ steps.changed_files.outputs.cpp_cuda }}
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Get all changed files
id: changed-files-yaml
uses: tj-actions/changed-files@v39
# https://github.com/tj-actions/changed-files#input_files_yaml_from_source_file
with:
files_yaml_from_source_file: .github/changed-files.yml
- name: Run Changed Files script
id: changed_files
# https://github.com/tj-actions/changed-files#outputs-
run: |
echo "golang=${{ steps.changed-files-yaml.outputs.golang_any_modified }}" >> "$GITHUB_OUTPUT"
echo "rust=${{ steps.changed-files-yaml.outputs.rust_any_modified }}" >> "$GITHUB_OUTPUT"
echo "cpp_cuda=${{ steps.changed-files-yaml.outputs.cpp_any_modified }}" >> "$GITHUB_OUTPUT"
test-rust-linux:
name: Test Rust on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: check-changed-files
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Run Rust Tests
working-directory: ./wrappers/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# Running tests from the root workspace will run all workspace members' tests by default
#TODO: remove test-threads once thread safety is finalized
run: cargo test --release --verbose -- --test-threads=1
test-cpp-linux:
name: Test C++ on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: check-changed-files
strategy:
matrix:
curve: [bn254, bls12_381, bls12_377, bw6_761]
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Build C++
working-directory: ./icicle
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
mkdir -p build
cmake -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release -DCURVE=${{ matrix.curve }} -S . -B build
cmake --build build
- name: Run C++ Tests
working-directory: ./icicle/build
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
run: ctest
# TODO: Reenable once Golang bindings for v1+ is finished
# test-golang-linux:
# name: Test Golang on Linux
# runs-on: [self-hosted, Linux, X64, icicle]
# needs: check-changed-files
# steps:
# - name: Checkout Repo
# uses: actions/checkout@v3
# - name: Build CUDA libs
# working-directory: ./goicicle
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# run: make libbn254.so
# - name: Run Golang Tests
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# run: |
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/goicicle
# go test ./goicicle/curves/bn254 -count=1

50
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Release
on:
workflow_dispatch:
inputs:
releaseType:
description: 'Release type'
required: true
default: 'minor'
type: choice
options:
- patch
- minor
- major
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ssh-key: ${{ secrets.DEPLOY_KEY }}
- name: Setup Cache
id: cache
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: ${{ runner.os }}-cargo-${{ hashFiles('~/.cargo/bin/cargo-workspaces') }}
- name: Install cargo-workspaces
if: steps.cache.outputs.cache-hit != 'true'
run: cargo install cargo-workspaces
- name: Bump rust crate versions, commit, and tag
working-directory: wrappers/rust
# https://github.com/pksunkara/cargo-workspaces?tab=readme-ov-file#version
run: |
git config user.name release-bot
git config user.email release-bot@ingonyama.com
cargo workspaces version ${{ inputs.releaseType }} -y --no-individual-tags -m "Bump rust crates' version"
- name: Create draft release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
LATEST_TAG=$(git describe --tags --abbrev=0)
gh release create $LATEST_TAG --generate-notes -d --verify-tag -t "Release $LATEST_TAG"

112
.github/workflows/rust.yml vendored Normal file
View File

@@ -0,0 +1,112 @@
name: Rust
on:
pull_request:
branches:
- main
- V2
push:
branches:
- main
- V2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-changed-files:
uses: ./.github/workflows/check-changed-files.yml
check-format:
name: Check Code Format
runs-on: ubuntu-22.04
needs: check-changed-files
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check rustfmt
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
working-directory: ./wrappers/rust
# "-name target -prune" removes searching in any directory named "target"
# Formatting by single file is necessary due to generated files not being present
# before building the project.
# e.g. icicle-cuda-runtime/src/bindings.rs is generated and icicle-cuda-runtime/src/lib.rs includes that module
# causing rustfmt to fail.
run: if [[ $(find . -path ./icicle-curves/icicle-curve-template -prune -o -name target -prune -o -iname *.rs -print | xargs cargo fmt --check --) ]]; then echo "Please run cargo fmt"; exit 1; fi
build-linux:
name: Build on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, check-format]
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Build
working-directory: ./wrappers/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# Building from the root workspace will build all members of the workspace by default
run: cargo build --release --verbose
test-linux:
name: Test on Linux
runs-on: [self-hosted, Linux, X64, icicle]
needs: [check-changed-files, build-linux]
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Run tests
working-directory: ./wrappers/rust
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# Running tests from the root workspace will run all workspace members' tests by default
# We need to limit the number of threads to avoid running out of memory on weaker machines
# ignored tests are polynomial tests. Since they conflict with NTT tests, they are executed separately
run: |
cargo test --workspace --exclude icicle-babybear --exclude icicle-stark252 --exclude icicle-m31 --release --verbose --features=g2 -- --test-threads=2 --ignored
cargo test --workspace --exclude icicle-babybear --exclude icicle-stark252 --exclude icicle-m31 --release --verbose --features=g2 -- --test-threads=2
- name: Run baby bear tests
working-directory: ./wrappers/rust/icicle-fields/icicle-babybear
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
cargo test --release --verbose -- --ignored
cargo test --release --verbose
- name: Run stark252 tests
working-directory: ./wrappers/rust/icicle-fields/icicle-stark252
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
cargo test --release --verbose -- --ignored
cargo test --release --verbose
- name: Run m31 tests
working-directory: ./wrappers/rust/icicle-fields/icicle-m31
if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
run: |
cargo test --release --verbose -- --ignored
cargo test --release --verbose
# build-windows:
# name: Build on Windows
# runs-on: windows-2022
# needs: check-changed-files
# steps:
# - name: Checkout Repo
# uses: actions/checkout@v4
# - name: Download and Install Cuda
# if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# id: cuda-toolkit
# uses: Jimver/cuda-toolkit@v0.2.11
# with:
# cuda: '12.0.0'
# method: 'network'
# # https://docs.nvidia.com/cuda/archive/12.0.0/cuda-installation-guide-microsoft-windows/index.html
# sub-packages: '["cudart", "nvcc", "thrust", "visual_studio_integration"]'
# - name: Build targets
# working-directory: ./wrappers/rust
# if: needs.check-changed-files.outputs.rust == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
# env:
# CUDA_PATH: ${{ steps.cuda-toolkit.outputs.CUDA_PATH }}
# CUDA_ARCH: 50 # Using CUDA_ARCH=50 env variable since the CI machines have no GPUs
# # Building from the root workspace will build all members of the workspace by default
# run: cargo build --release --verbose

29
.github/workflows/test-deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Test Deploy to GitHub Pages
on:
pull_request:
branches:
- main
paths:
- 'docs/**'
jobs:
test-deploy:
name: Test deployment of docs website
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
path: 'repo'
- uses: actions/setup-node@v3
with:
node-version: 18
cache: npm
cache-dependency-path: ./repo/docs/package-lock.json
- name: Install dependencies
run: npm install --frozen-lockfile
working-directory: ./repo/docs
- name: Test build website
run: npm run build
working-directory: ./repo/docs

3
.gitignore vendored
View File

@@ -8,6 +8,7 @@
*.so
*.nsys-rep
*.ncu-rep
*.sage.py
**/target
**/.vscode
**/.*lock*csv#
@@ -16,4 +17,4 @@
**/Cargo.lock
**/icicle/build/
**/wrappers/rust/icicle-cuda-runtime/src/bindings.rs
**/build
**/build*

View File

@@ -15,7 +15,7 @@ ENV PATH="/root/.cargo/bin:${PATH}"
# Install Golang
ENV GOLANG_VERSION 1.21.1
RUN curl -L https://golang.org/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -xz -C /usr/local
RUN curl -L https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -xz -C /usr/local
ENV PATH="/usr/local/go/bin:${PATH}"
# Set the working directory in the container

View File

@@ -1,20 +1,22 @@
# ICICLE
**<div align="center">ICICLE is a library for ZK acceleration using CUDA-enabled GPUs.</div>**
<div align="center">ICICLE is a library for ZK acceleration using CUDA-enabled GPUs.</div>
<p align="center">
<img alt="ICICLE" width="300" height="300" src="https://user-images.githubusercontent.com/2446179/223707486-ed8eb5ab-0616-4601-8557-12050df8ccf7.png"/>
</p>
<p align="center">
<a href="https://discord.gg/EVVXTdt6DF">
<img src="https://img.shields.io/discord/1063033227788423299?logo=discord" alt="Chat with us on Discord">
</a>
<a href="https://twitter.com/intent/follow?screen_name=Ingo_zk">
<img src="https://img.shields.io/twitter/follow/Ingo_zk?style=social&logo=twitter" alt="Follow us on Twitter">
<a href="https://github.com/ingonyama-zk/icicle/releases">
<img src="https://img.shields.io/github/v/release/ingonyama-zk/icicle" alt="GitHub Release">
</a>
</p>
## Background
Zero Knowledge Proofs (ZKPs) are considered one of the greatest achievements of modern cryptography. Accordingly, ZKPs are expected to disrupt a number of industries and will usher in an era of trustless and privacy preserving services and infrastructure.
@@ -43,12 +45,12 @@ ICICLE is a CUDA implementation of general functions widely used in ZKP.
- [GCC](https://gcc.gnu.org/install/download.html) version 9, latest version is recommended.
- Any Nvidia GPU (which supports CUDA Toolkit version 12.0 or above).
> [!NOTE]
> It is possible to use CUDA 11 for cards which dont support CUDA 12, however we dont officially support this version and in the future there may be issues.
> [!NOTE]
> It is possible to use CUDA 11 for cards which don't support CUDA 12, however we don't officially support this version and in the future there may be issues.
### Accessing Hardware
If you don't have access to a Nvidia GPU we have some options for you.
If you don't have access to an Nvidia GPU we have some options for you.
Checkout [Google Colab](https://colab.google/). Google Colab offers a free [T4 GPU](https://www.nvidia.com/en-us/data-center/tesla-t4/) instance and ICICLE can be used with it, reference this guide for setting up your [Google Colab workplace][GOOGLE-COLAB-ICICLE].
@@ -71,7 +73,7 @@ Running ICICLE via Rust bindings is highly recommended and simple:
- Clone this repo
- go to our [Rust bindings][ICICLE-RUST]
- Enter a [curve](./wrappers/rust/icicle-curves) implementation
- run `cargo build --release` to build or `cargo test -- --test-threads=1` to build and execute tests
- run `cargo build --release` to build or `cargo test` to build and execute tests
In any case you would want to compile and run core icicle c++ tests, just follow these setps:
- Clone this repo
@@ -113,7 +115,11 @@ This will ensure our custom hooks are run and will make it easier to follow our
- [Robik](https://github.com/robik75), for his ongoing support and mentorship
- [liuxiao](https://github.com/liuxiaobleach), for being a top notch bug smasher
- [gkigiermo](https://github.com/gkigiermo), for making it intuitive to use ICICLE in Google Colab.
- [gkigiermo](https://github.com/gkigiermo), for making it intuitive to use ICICLE in Google Colab
- [nonam3e](https://github.com/nonam3e), for adding Grumpkin curve support into ICICLE
- [alxiong](https://github.com/alxiong), for adding warmup for CudaStream
- [cyl19970726](https://github.com/cyl19970726), for updating go install source in Dockerfile
- [PatStiles](https://github.com/PatStiles), for adding Stark252 field
## Help & Support
@@ -138,14 +144,14 @@ See [LICENSE-MIT][LMIT] for details.
[HOOKS_DOCS]: https://git-scm.com/docs/githooks
[HOOKS_PATH]: ./scripts/hooks/
[CMAKELISTS]: https://github.com/ingonyama-zk/icicle/blob/f0e6b465611227b858ec4590f4de5432e892748d/icicle/CMakeLists.txt#L28
[GOOGLE-COLAB-ICICLE]: https://github.com/gkigiermo/rust-cuda-colab
[GOOGLE-COLAB-ICICLE]: https://dev.ingonyama.com/icicle/colab-instructions
[GRANT_PROGRAM]: https://medium.com/@ingonyama/icicle-for-researchers-grants-challenges-9be1f040998e
[ICICLE-CORE]: ./icicle/
[ICICLE-RUST]: ./wrappers/rust/
[ICICLE-GO]: ./goicicle/
[ICICLE-GO]: ./wrappers/golang/
[ICICLE-CORE-README]: ./icicle/README.md
[ICICLE-RUST-README]: ./wrappers/rust/README.md
[ICICLE-GO-README]: ./goicicle/README.md
[ICICLE-GO-README]: ./wrappers/golang/README.md
[documentation]: https://dev.ingonyama.com/icicle/overview
[examples]: ./examples/

1
docs/.codespellignore Normal file
View File

@@ -0,0 +1 @@
ICICLE

17
docs/.gitignore vendored Normal file
View File

@@ -0,0 +1,17 @@
.docusaurus/
node_modules/
yarn.lock
.DS_Store
# tex build artifacts
.aux
.bbl
.bcf
.blg
.fdb_latexmk
.fls
.log
.out
.xml
.gz
.toc

17
docs/.prettierignore Normal file
View File

@@ -0,0 +1,17 @@
.docusaurus/
node_modules/
yarn.lock
.DS_Store
# tex build artifacts
.aux
.bbl
.bcf
.blg
.fdb_latexmk
.fls
.log
.out
.xml
.gz
.toc

10
docs/.prettierrc Normal file
View File

@@ -0,0 +1,10 @@
{
"semi": false,
"singleQuote": true,
"trailingComma": "es5",
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"proseWrap": "preserve",
"endOfLine": "lf"
}

1
docs/CNAME Normal file
View File

@@ -0,0 +1 @@
dev.ingonyama.com

39
docs/README.md Normal file
View File

@@ -0,0 +1,39 @@
# Website
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ npm i
```
### Local Development
```
$ npm start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ npm run build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true npm run deploy
```
Not using SSH:
```
$ GIT_USER=<Your GitHub username> npm run deploy
```

3
docs/babel.config.js Normal file
View File

@@ -0,0 +1,3 @@
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};

12
docs/docs/ZKContainers.md Normal file
View File

@@ -0,0 +1,12 @@
# ZKContainer
We found that developing ZK provers with ICICLE gives developers the ability to scale ZK provers across many machines and many GPUs. To make this possible we developed the ZKContainer.
## What is a ZKContainer?
A ZKContainer is a standardized, optimized and secure docker container that we configured with ICICLE applications in mind. A developer using our ZKContainer can deploy an ICICLE application on a single machine or on a thousand GPU machines in a data center with minimal concerns regarding compatibility.
ZKContainer has been used by Ingonyama clients to achieve scalability across large data centers.
We suggest you read our [article](https://medium.com/@ingonyama/product-announcement-zk-containers-0e2a1f2d0a2b) regarding ZKContainer to understand the benefits of using them.
![ZKContainer inside a ZK data center](../static/img/architecture-zkcontainer.png)

View File

@@ -0,0 +1,23 @@
# Contributor's Guide
We welcome all contributions with open arms. At Ingonyama we take a village approach, believing it takes many hands and minds to build an ecosystem.
## Contributing to ICICLE
- Make suggestions or report bugs via [GitHub issues](https://github.com/ingonyama-zk/icicle/issues)
- Contribute to the ICICLE by opening a [pull request](https://github.com/ingonyama-zk/icicle/pulls).
- Contribute to our [documentation](https://github.com/ingonyama-zk/icicle/tree/main/docs) and [examples](https://github.com/ingonyama-zk/icicle/tree/main/examples).
- Ask questions on Discord
### Opening a pull request
When opening a [pull request](https://github.com/ingonyama-zk/icicle/pulls) please keep the following in mind.
- `Clear Purpose` - The pull request should solve a single issue and be clean of any unrelated changes.
- `Clear description` - If the pull request is for a new feature describe what you built, why you added it and how it's best that we test it. For bug fixes please describe the issue and the solution.
- `Consistent style` - Rust and Golang code should be linted by the official linters (golang fmt and rust fmt) and maintain a proper style. For CUDA and C++ code we use [`clang-format`](https://github.com/ingonyama-zk/icicle/blob/main/.clang-format), [here](https://github.com/ingonyama-zk/icicle/blob/605c25f9d22135c54ac49683b710fe2ce06e2300/.github/workflows/main-format.yml#L46) you can see how we run it.
- `Minimal Tests` - please add test which cover basic usage of your changes.
## Questions?
Find us on [Discord](https://discord.gg/6vYrE7waPj).

23
docs/docs/grants.md Normal file
View File

@@ -0,0 +1,23 @@
# Ingonyama Grant programs
Ingonyama understands the importance of supporting and fostering a vibrant community of researchers and builders to advance ZK. To encourage progress, we are not only developing in the open but also sharing resources with researchers and builders through various programs.
## ICICLE ZK-GPU Ecosystem Grant
Ingonyama invites researchers and practitioners to collaborate in advancing ZK acceleration. We are allocating $100,000 for grants to support this initiative.
### Bounties & Grants
Eligibility for grants includes:
1. **Students**: Utilize ICICLE in your research.
2. **Performance Improvement**: Enhance the performance of accelerated primitives in ICICLE.
3. **Protocol Porting**: Migrate existing ZK protocols to ICICLE.
4. **New Primitives**: Contribute new primitives to ICICLE.
5. **Benchmarking**: Compare ZK benchmarks against ICICLE.
## Contact
For questions or submissions: [grants@ingonyama.com](mailto:grants@ingonyama.com)
**Read the full article [here](https://www.ingonyama.com/blog/icicle-for-researchers-grants-challenges)**

View File

@@ -0,0 +1,138 @@
# Run ICICLE on Google Colab
Google Colab lets you use a GPU free of charge, it's an Nvidia T4 GPU with 16 GB of memory, capable of running latest CUDA (tested on Cuda 12.2)
As Colab is able to interact with shell commands, a user can also install a framework and load git repositories into Colab space.
## Prepare Colab environment
First thing to do in a notebook is to set the runtime type to a T4 GPU.
- in the upper corner click on the dropdown menu and select "change runtime type"
![Change runtime](../../static/img/colab_change_runtime.png)
- In the window select "T4 GPU" and press Save
![T4 GPU](../../static/img/t4_gpu.png)
Installing Rust is rather simple, just execute the following command:
```sh
!apt install rustc cargo
```
To test the installation of Rust:
```sh
!rustc --version
!cargo --version
```
A successful installation will result in a rustc and cargo version print, a faulty installation will look like this:
```sh
/bin/bash: line 1: rustc: command not found
/bin/bash: line 1: cargo: command not found
```
Now we will check the environment:
```sh
!nvcc --version
!gcc --version
!cmake --version
!nvidia-smi
```
A correct environment should print the result with no bash errors for `nvidia-smi` command and result in a **Teslt T4 GPU** type:
```sh
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2023 NVIDIA Corporation
Built on Tue_Aug_15_22:02:13_PDT_2023
Cuda compilation tools, release 12.2, V12.2.140
Build cuda_12.2.r12.2/compiler.33191640_0
gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Copyright (C) 2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
cmake version 3.27.9
CMake suite maintained and supported by Kitware (kitware.com/cmake).
Wed Jan 17 13:10:18 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 39C P8 9W / 70W | 0MiB / 15360MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| No running processes found |
+---------------------------------------------------------------------------------------+
```
## Cloning ICICLE and running test
Now we are ready to clone ICICE repository,
```sh
!git clone https://github.com/ingonyama-zk/icicle.git
```
We now can browse the repository and run tests to check the runtime environment:
```sh
!ls -la
%cd icicle
```
Let's run a test!
Navigate to icicle/wrappers/rust/icicle-curves/icicle-bn254 and run cargo test:
```sh
%cd wrappers/rust/icicle-curves/icicle-bn254/
!cargo test --release
```
:::note
Compiling the first time may take a while
:::
Test run should end like this:
```sh
running 15 tests
test curve::tests::test_ark_point_convert ... ok
test curve::tests::test_ark_scalar_convert ... ok
test curve::tests::test_affine_projective_convert ... ok
test curve::tests::test_point_equality ... ok
test curve::tests::test_field_convert_montgomery ... ok
test curve::tests::test_scalar_equality ... ok
test curve::tests::test_points_convert_montgomery ... ok
test msm::tests::test_msm ... ok
test msm::tests::test_msm_skewed_distributions ... ok
test ntt::tests::test_ntt ... ok
test ntt::tests::test_ntt_arbitrary_coset ... ok
test msm::tests::test_msm_batch has been running for over 60 seconds
test msm::tests::test_msm_batch ... ok
test ntt::tests::test_ntt_coset_from_subgroup ... ok
test ntt::tests::test_ntt_device_async ... ok
test ntt::tests::test_ntt_batch ... ok
test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 99.39s
```
Viola, ICICLE in Colab!

196
docs/docs/icicle/core.md Normal file
View File

@@ -0,0 +1,196 @@
# ICICLE Core
ICICLE Core is a library written in C++/CUDA. All the ICICLE primitives are implemented within ICICLE Core.
The Core is split into logical modules that can be compiled into static libraries using different [strategies](#compilation-strategies). You can then [link](#linking) these libraries with your C++ project or write your own [bindings](#writing-new-bindings-for-icicle) for other programming languages. If you want to use ICICLE with existing bindings please refer to the [Rust](/icicle/rust-bindings) or [Golang](/icicle/golang-bindings) bindings documentation.
## Supported curves, fields and operations
### Supported curves and operations
| Operation\Curve | [bn254](https://neuromancer.sk/std/bn/bn254) | [bls12-377](https://neuromancer.sk/std/bls/BLS12-377) | [bls12-381](https://neuromancer.sk/std/bls/BLS12-381) | [bw6-761](https://eprint.iacr.org/2020/351) | grumpkin |
| --- | :---: | :---: | :---: | :---: | :---: |
| [MSM][MSM_DOCS] | ✅ | ✅ | ✅ | ✅ | ✅ |
| G2 | ✅ | ✅ | ✅ | ✅ | ❌ |
| [NTT][NTT_DOCS] | ✅ | ✅ | ✅ | ✅ | ❌ |
| ECNTT | ✅ | ✅ | ✅ | ✅ | ❌ |
| [VecOps][VECOPS_CODE] | ✅ | ✅ | ✅ | ✅ | ✅ |
| [Polynomials][POLY_DOCS] | ✅ | ✅ | ✅ | ✅ | ❌ |
| [Poseidon](primitives/poseidon) | ✅ | ✅ | ✅ | ✅ | ✅ |
| [Merkle Tree](primitives/poseidon#the-tree-builder) | ✅ | ✅ | ✅ | ✅ | ✅ |
### Supported fields and operations
| Operation\Field | [babybear](https://eprint.iacr.org/2023/824.pdf) | [Stark252](https://docs.starknet.io/documentation/architecture_and_concepts/Cryptography/p-value/) |
| --- | :---: | :---: |
| [VecOps][VECOPS_CODE] | ✅ | ✅ |
| [Polynomials][POLY_DOCS] | ✅ | ✅ |
| [NTT][NTT_DOCS] | ✅ | ✅ |
| Extension Field | ✅ | ❌ |
### Supported hashes
| Hash | Sizes |
| --- | :---: |
| Keccak | 256, 512 |
## Compilation strategies
Most of the codebase is curve/field agnostic, which means it can be compiled for different curves and fields. When you build ICICLE Core you choose a single curve or field. If you need multiple curves or fields, you compile ICICLE once per curve or field that is needed. It's that simple. Currently, the following choices are supported:
- [Field mode][COMPILE_FIELD_MODE] - used for STARK fields like BabyBear / Mersenne / Goldilocks. Includes field arithmetic, NTT, Poseidon, Extension fields and other primitives.
- [Curve mode][COMPILE_CURVE_MODE] - used for SNARK curves like BN254 / BLS curves / Grumpkin / etc. Curve mode is built upon field mode, so it includes everything that field does It also includes curve operations / MSM / ECNTT / G2 and other curve-related primitives.
:::info
If you only want to use a curve's scalar or base field, you still need to use curve mode. You can disable MSM with [options](#compilation-options)
:::
### Compiling for a field
You can compile ICICLE for a field using this command:
```sh
cd icicle
mkdir -p build
cmake -DFIELD=<FIELD> -S . -B build
cmake --build build -j
```
This command will output `libingo_field_<FIELD>.a` into `build/lib`.
### Compiling for a curve
:::note
Field related primitives will be compiled for the scalar field of the curve
:::
You can compile ICICLE for a SNARK curve using this command:
```sh
cd icicle
mkdir -p build
cmake -DCURVE=<CURVE> -S . -B build
cmake --build build -j
```
Where `<CURVE>` can be one of `bn254`/`bls12_377`/`bls12_381`/`bw6_761`/`grumpkin`.
This command will output both `libingo_curve_<CURVE>.a` and `libingo_field_<CURVE>.a` into `build/lib`.
### Compilation options
There exist multiple options that allow you to customize your build or enable additional functionality.
#### EXT_FIELD
Used only in [field mode][COMPILE_FIELD_MODE] to add an Extension field. Adds all supported field operations for the extension field.
Default: `OFF`
Usage: `-DEXT_FIELD=ON`
#### G2
Used only in [curve mode][COMPILE_CURVE_MODE] to add G2 definitions. Also adds G2 MSM.
Default: `OFF`
Usage: `-DG2=ON`
#### ECNTT
Used only in [curve mode][COMPILE_CURVE_MODE] to add ECNTT function.
Default: `OFF`
Usage: `-DECNTT=ON`
#### MSM
Used only in [curve mode][COMPILE_CURVE_MODE] to add MSM function. As MSM takes a lot of time to build, you can disable it with this option to reduce compilation time.
Default: `ON`
Usage: `-DMSM=OFF`
#### BUILD_HASH
Can be used in any mode to build a hash library. Currently it only includes Keccak hash function, but more are coming.
Default: `OFF`
Usage: `-DBUILD_HASH=ON`
#### BUILD_TESTS
Can be used in any mode to include tests runner binary.
Default: `OFF`
USAGE: `-DBUILD_TESTS=ON`
#### BUILD_BENCHMARKS
Can be used in any mode to include benchmarks runner binary.
Default: `OFF`
USAGE: `-DBUILD_BENCHMARKS=ON`
#### DEVMODE
Can be used in any mode to include debug symbols in the build.
Default: `OFF`
USAGE: `-DEVMODE=ON`
## Linking
To link ICICLE with your project you first need to compile ICICLE with options of your choice. After that you can use CMake `target_link_libraries` to link with the generated static libraries and `target_include_directories` to include ICICLE headers (located in `icicle/include`).
Refer to our [c++ examples](https://github.com/ingonyama-zk/icicle/tree/main/examples/c%2B%2B) for more info. Take a look at this [CMakeLists.txt](https://github.com/ingonyama-zk/icicle/blob/main/examples/c%2B%2B/msm/CMakeLists.txt#L22)
## Writing new bindings for ICICLE
Since ICICLE Core is written in CUDA / C++ its really simple to generate static libraries. These static libraries can be installed on any system and called by higher level languages such as Golang.
Static libraries can be loaded into memory once and used by multiple programs, reducing memory usage and potentially improving performance. They also allow you to separate functionality into distinct modules so your static library may need to compile only specific features that you want to use.
Let's review the [Golang bindings][GOLANG_BINDINGS] since its a pretty verbose example (compared to rust which hides it pretty well) of using static libraries. Golang has a library named `CGO` which can be used to link static libraries. Here's a basic example on how you can use cgo to link these libraries:
```go
/*
#cgo LDFLAGS: -L/path/to/shared/libs -lbn254 -lbls12_381 -lbls12_377 -lbw6_671
#include "icicle.h" // make sure you use the correct header file(s)
*/
import "C"
func main() {
// Now you can call the C functions from the ICICLE libraries.
// Note that C function calls are prefixed with 'C.' in Go code.
out := (*C.BN254_projective_t)(unsafe.Pointer(p))
in := (*C.BN254_affine_t)(unsafe.Pointer(affine))
C.projective_from_affine_bn254(out, in)
}
```
The comments on the first line tell `CGO` which libraries to import as well as which header files to include. You can then call methods which are part of the static library and defined in the header file, `C.projective_from_affine_bn254` is an example.
If you wish to create your own bindings for a language of your choice we suggest you start by investigating how you can call static libraries.
<!-- Begin Links -->
[GOLANG_BINDINGS]: golang-bindings.md
[COMPILE_CURVE_MODE]: #compiling-for-a-curve
[COMPILE_FIELD_MODE]: #compiling-for-a-field
[NTT_DOCS]: primitives/ntt
[MSM_DOCS]: primitives/msm
[POLY_DOCS]: polynomials/overview
[VECOPS_CODE]: https://github.com/ingonyama-zk/icicle/blob/main/icicle/include/vec_ops/vec_ops.cuh
<!-- End Links -->

View File

@@ -0,0 +1,136 @@
# Golang bindings
Golang bindings allow you to use ICICLE as a golang library.
The source code for all Golang packages can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang).
The Golang bindings are comprised of multiple packages.
[`core`](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang/core) which defines all shared methods and structures, such as configuration structures, or memory slices.
[`cuda-runtime`](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang/cuda_runtime) which defines abstractions for CUDA methods for allocating memory, initializing and managing streams, and `DeviceContext` which enables users to define and keep track of devices.
Each supported curve, field, and hash has its own package which you can find in the respective directories [here](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang). If your project uses BN254 you only need to import that single package named [`bn254`](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang/curves/bn254).
## Using ICICLE Golang bindings in your project
To add ICICLE to your `go.mod` file.
```bash
go get github.com/ingonyama-zk/icicle
```
If you want to specify a specific branch
```bash
go get github.com/ingonyama-zk/icicle@<branch_name>
```
For a specific commit
```bash
go get github.com/ingonyama-zk/icicle@<commit_id>
```
To build the shared libraries you can run [this](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang/build.sh) script:
```sh
./build.sh [-curve=<curve>] [-field=<field>] [-hash=<hash>] [-cuda_version=<version>] [-g2] [-ecntt] [-devmode]
curve - The name of the curve to build or "all" to build all supported curves
field - The name of the field to build or "all" to build all supported fields
hash - The name of the hash to build or "all" to build all supported hashes
-g2 - Optional - build with G2 enabled
-ecntt - Optional - build with ECNTT enabled
-devmode - Optional - build in devmode
-help - Optional - Displays usage information
```
:::note
If more than one curve or more than one field or more than one hash is supplied, the last one supplied will be built
:::
To build ICICLE libraries for all supported curves with G2 and ECNTT enabled.
```bash
./build.sh -curve=all -g2 -ecntt
```
If you wish to build for a specific curve, for example bn254, without G2 or ECNTT enabled.
``` bash
./build.sh -curve=bn254
```
Now you can import ICICLE into your project
```go
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
)
...
```
## Running tests
To run all tests, for all curves:
```bash
go test ./... -count=1
```
If you wish to run test for a specific curve:
```bash
go test <path_to_curve> -count=1
```
## How do Golang bindings work?
The libraries produced from the CUDA code compilation are used to bind Golang to ICICLE's CUDA code.
1. These libraries (named `libingo_curve_<curve>.a` and `libingo_field_<curve>.a`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
2. In your Go project, you can use `cgo` to link these libraries. Here's a basic example on how you can use `cgo` to link these libraries:
```go
/*
#cgo LDFLAGS: -L/path/to/shared/libs -lingo_curve_bn254 -L$/path/to/shared/libs -lingo_field_bn254 -lstdc++ -lm
#include "icicle.h" // make sure you use the correct header file(s)
*/
import "C"
func main() {
// Now you can call the C functions from the ICICLE libraries.
// Note that C function calls are prefixed with 'C.' in Go code.
}
```
Replace `/path/to/shared/libs` with the actual path where the shared libraries are located on your system.
## Supported curves, fields and operations
### Supported curves and operations
| Operation\Curve | bn254 | bls12_377 | bls12_381 | bw6-761 | grumpkin |
| --- | :---: | :---: | :---: | :---: | :---: |
| MSM | ✅ | ✅ | ✅ | ✅ | ✅ |
| G2 | ✅ | ✅ | ✅ | ✅ | ❌ |
| NTT | ✅ | ✅ | ✅ | ✅ | ❌ |
| ECNTT | ✅ | ✅ | ✅ | ✅ | ❌ |
| VecOps | ✅ | ✅ | ✅ | ✅ | ✅ |
| Polynomials | ✅ | ✅ | ✅ | ✅ | ❌ |
### Supported fields and operations
| Operation\Field | babybear |
| --- | :---: |
| VecOps | ✅ |
| Polynomials | ✅ |
| NTT | ✅ |
| Extension Field | ✅ |

View File

@@ -0,0 +1,92 @@
# ECNTT
## ECNTT Method
The `ECNtt[T any]()` function performs the Elliptic Curve Number Theoretic Transform (EC-NTT) on the input points slice, using the provided dir (direction), cfg (configuration), and stores the results in the results slice.
```go
func ECNtt[T any](points core.HostOrDeviceSlice, dir core.NTTDir, cfg *core.NTTConfig[T], results core.HostOrDeviceSlice) core.IcicleError
```
### Parameters
- **`points`**: A slice of elliptic curve points (in projective coordinates) that will be transformed. The slice can be stored on the host or the device, as indicated by the `core.HostOrDeviceSlice` type.
- **`dir`**: The direction of the EC-NTT transform, either `core.KForward` or `core.KInverse`.
- **`cfg`**: A pointer to an `NTTConfig` object, containing configuration options for the NTT operation.
- **`results`**: A slice that will store the transformed elliptic curve points (in projective coordinates). The slice can be stored on the host or the device, as indicated by the `core.HostOrDeviceSlice` type.
### Return Value
- **`CudaError`**: A `core.IcicleError` value, which will be `core.IcicleErrorCode(0)` if the EC-NTT operation was successful, or an error if something went wrong.
## NTT Configuration (NTTConfig)
The `NTTConfig` structure holds configuration parameters for the NTT operation, allowing customization of its behavior to optimize performance based on the specifics of your protocol.
```go
type NTTConfig[T any] struct {
Ctx cr.DeviceContext
CosetGen T
BatchSize int32
ColumnsBatch bool
Ordering Ordering
areInputsOnDevice bool
areOutputsOnDevice bool
IsAsync bool
NttAlgorithm NttAlgorithm
}
```
### Fields
- **`Ctx`**: Device context containing details like device ID and stream ID.
- **`CosetGen`**: Coset generator used for coset (i)NTTs, defaulting to no coset being used.
- **`BatchSize`**: The number of NTTs to compute in one operation, defaulting to 1.
- **`ColumnsBatch`**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
- **`Ordering`**: Ordering of inputs and outputs (`KNN`, `KNR`, `KRN`, `KRR`), affecting how data is arranged.
- **`areInputsOnDevice`**: Indicates if input scalars are located on the device.
- **`areOutputsOnDevice`**: Indicates if results are stored on the device.
- **`IsAsync`**: Controls whether the NTT operation runs asynchronously.
- **`NttAlgorithm`**: Explicitly select the NTT algorithm. ECNTT supports running on `Radix2` algorithm.
### Default Configuration
Use `GetDefaultNTTConfig` to obtain a default configuration, customizable as needed.
```go
func GetDefaultNTTConfig[T any](cosetGen T) NTTConfig[T]
```
## ECNTT Example
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
)
func Main() {
// Obtain the default NTT configuration with a predefined coset generator.
cfg := GetDefaultNttConfig()
// Define the size of the input scalars.
size := 1 << 18
// Generate Points for the ECNTT operation.
points := GenerateProjectivePoints(size)
// Set the direction of the NTT (forward or inverse).
dir := core.KForward
// Allocate memory for the results of the NTT operation.
results := make(core.HostSlice[Projective], size)
// Perform the NTT operation.
err := ECNtt(points, dir, &cfg, results)
if err != cr.CudaSuccess {
panic("ECNTT operation failed")
}
}
```

View File

@@ -0,0 +1,94 @@
# Keccak
## Keccak Example
```go
package main
import (
"encoding/hex"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/hash/keccak"
)
func createHostSliceFromHexString(hexString string) core.HostSlice[uint8] {
byteArray, err := hex.DecodeString(hexString)
if err != nil {
panic("Not a hex string")
}
return core.HostSliceFromElements([]uint8(byteArray))
}
func main() {
input := createHostSliceFromHexString("1725b6")
outHost256 := make(core.HostSlice[uint8], 32)
cfg := keccak.GetDefaultHashConfig()
e := keccak.Keccak256(input, int32(input.Len()), 1, outHost256, &cfg)
if e.CudaErrorCode != cr.CudaSuccess {
panic("Keccak256 hashing failed")
}
outHost512 := make(core.HostSlice[uint8], 64)
e = keccak.Keccak512(input, int32(input.Len()), 1, outHost512, &cfg)
if e.CudaErrorCode != cr.CudaSuccess {
panic("Keccak512 hashing failed")
}
numberOfBlocks := 3
outHostBatch256 := make(core.HostSlice[uint8], 32*numberOfBlocks)
e = keccak.Keccak256(input, int32(input.Len()/numberOfBlocks), int32(numberOfBlocks), outHostBatch256, &cfg)
if e.CudaErrorCode != cr.CudaSuccess {
panic("Keccak256 batch hashing failed")
}
}
```
## Keccak Methods
```go
func Keccak256(input core.HostOrDeviceSlice, inputBlockSize, numberOfBlocks int32, output core.HostOrDeviceSlice, config *HashConfig) core.IcicleError
func Keccak512(input core.HostOrDeviceSlice, inputBlockSize, numberOfBlocks int32, output core.HostOrDeviceSlice, config *HashConfig) core.IcicleError
```
### Parameters
- **`input`**: A slice containing the input data for the Keccak256 hash function. It can reside in either host memory or device memory.
- **`inputBlockSize`**: An integer specifying the size of the input data for a single hash.
- **`numberOfBlocks`**: An integer specifying the number of results in the hash batch.
- **`output`**: A slice where the resulting hash will be stored. This slice can be in host or device memory.
- **`config`**: A pointer to a `HashConfig` object, which contains various configuration options for the Keccak256 operation.
### Return Value
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the Keccak256/Keccak512 operation.
## HashConfig
The `HashConfig` structure holds configuration parameters for the Keccak256/Keccak512 operation, allowing customization of its behavior to optimize performance based on the specifics of the operation or the underlying hardware.
```go
type HashConfig struct {
Ctx cr.DeviceContext
areInputsOnDevice bool
areOutputsOnDevice bool
IsAsync bool
}
```
### Fields
- **`Ctx`**: Device context containing details like device id and stream.
- **`areInputsOnDevice`**: Indicates if input data is located on the device.
- **`areOutputsOnDevice`**: Indicates if output hash is stored on the device.
- **`IsAsync`**: If true, runs the Keccak256/Keccak512 operation asynchronously.
### Default Configuration
Use `GetDefaultHashConfig` to obtain a default configuration, which can then be customized as needed.
```go
func GetDefaultHashConfig() HashConfig
```

View File

@@ -0,0 +1,99 @@
# MSM Pre computation
To understand the theory behind MSM pre computation technique refer to Niall Emmart's [talk](https://youtu.be/KAWlySN7Hm8?feature=shared&t=1734).
## Core package
### MSM PrecomputePoints
`PrecomputePoints` and `G2PrecomputePoints` exists for all supported curves.
#### Description
This function extends each provided base point $(P)$ with its multiples $(2^lP, 2^{2l}P, ..., 2^{(precompute_factor - 1) \cdot l}P)$, where $(l)$ is a level of precomputation determined by the `precompute_factor`. The extended set of points facilitates faster MSM computations by allowing the MSM algorithm to leverage precomputed multiples of base points, reducing the number of point additions required during the computation.
The precomputation process is crucial for optimizing MSM operations, especially when dealing with large sets of points and scalars. By precomputing and storing multiples of the base points, the MSM function can more efficiently compute the scalar-point multiplications.
#### `PrecomputePoints`
Precomputes points for MSM by extending each base point with its multiples.
```go
func PrecomputePoints(points core.HostOrDeviceSlice, msmSize int, cfg *core.MSMConfig, outputBases core.DeviceSlice) cr.CudaError
```
##### Parameters
- **`points`**: A slice of the original affine points to be extended with their multiples.
- **`msmSize`**: The size of a single msm in order to determine optimal parameters.
- **`cfg`**: The MSM configuration parameters.
- **`outputBases`**: The device slice allocated for storing the extended points.
##### Example
```go
package main
import (
"log"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
)
func main() {
cfg := bn254.GetDefaultMSMConfig()
points := bn254.GenerateAffinePoints(1024)
var precomputeFactor int32 = 8
var precomputeOut core.DeviceSlice
precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
err := bn254.PrecomputePoints(points, 1024, &cfg, precomputeOut)
if err != cr.CudaSuccess {
log.Fatalf("PrecomputeBases failed: %v", err)
}
}
```
#### `G2PrecomputePoints`
This method is the same as `PrecomputePoints` but for G2 points. Extends each G2 curve base point with its multiples for optimized MSM computations.
```go
func G2PrecomputePoints(points core.HostOrDeviceSlice, msmSize int, cfg *core.MSMConfig, outputBases core.DeviceSlice) cr.CudaError
```
##### Parameters
- **`points`**: A slice of the original affine points to be extended with their multiples.
- **`msmSize`**: The size of a single msm in order to determine optimal parameters.
- **`cfg`**: The MSM configuration parameters.
- **`outputBases`**: The device slice allocated for storing the extended points.
##### Example
```go
package main
import (
"log"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
g2 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254/g2"
)
func main() {
cfg := g2.G2GetDefaultMSMConfig()
points := g2.G2GenerateAffinePoints(1024)
var precomputeFactor int32 = 8
var precomputeOut core.DeviceSlice
precomputeOut.Malloc(points[0].Size()*points.Len()*int(precomputeFactor), points[0].Size())
err := g2.G2PrecomputePoints(points, 1024, 0, &cfg, precomputeOut)
if err != cr.CudaSuccess {
log.Fatalf("PrecomputeBases failed: %v", err)
}
}
```

View File

@@ -0,0 +1,198 @@
# MSM
## MSM Example
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
bn254_msm "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254/msm"
)
func main() {
// Obtain the default MSM configuration.
cfg := core.GetDefaultMSMConfig()
// Define the size of the problem, here 2^18.
size := 1 << 18
// Generate scalars and points for the MSM operation.
scalars := bn254.GenerateScalars(size)
points := bn254.GenerateAffinePoints(size)
// Create a CUDA stream for asynchronous operations.
stream, _ := cr.CreateStream()
var p bn254.Projective
// Allocate memory on the device for the result of the MSM operation.
var out core.DeviceSlice
_, e := out.MallocAsync(p.Size(), p.Size(), stream)
if e != cr.CudaSuccess {
panic(e)
}
// Set the CUDA stream in the MSM configuration.
cfg.Ctx.Stream = &stream
cfg.IsAsync = true
// Perform the MSM operation.
e = bn254_msm.Msm(scalars, points, &cfg, out)
if e != cr.CudaSuccess {
panic(e)
}
// Allocate host memory for the results and copy the results from the device.
outHost := make(core.HostSlice[bn254.Projective], 1)
cr.SynchronizeStream(&stream)
outHost.CopyFromDevice(&out)
// Free the device memory allocated for the results.
out.Free()
}
```
## MSM Method
```go
func Msm(scalars core.HostOrDeviceSlice, points core.HostOrDeviceSlice, cfg *core.MSMConfig, results core.HostOrDeviceSlice) cr.CudaError
```
### Parameters
- **`scalars`**: A slice containing the scalars for multiplication. It can reside either in host memory or device memory.
- **`points`**: A slice containing the points to be multiplied with scalars. Like scalars, these can also be in host or device memory.
- **`cfg`**: A pointer to an `MSMConfig` object, which contains various configuration options for the MSM operation.
- **`results`**: A slice where the results of the MSM operation will be stored. This slice can be in host or device memory.
### Return Value
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the MSM operation.
## MSMConfig
The `MSMConfig` structure holds configuration parameters for the MSM operation, allowing customization of its behavior to optimize performance based on the specifics of the operation or the underlying hardware.
```go
type MSMConfig struct {
Ctx cr.DeviceContext
PrecomputeFactor int32
C int32
Bitsize int32
LargeBucketFactor int32
batchSize int32
areScalarsOnDevice bool
AreScalarsMontgomeryForm bool
arePointsOnDevice bool
ArePointsMontgomeryForm bool
areResultsOnDevice bool
IsBigTriangle bool
IsAsync bool
}
```
### Fields
- **`Ctx`**: Device context containing details like device id and stream.
- **`PrecomputeFactor`**: Controls the number of extra points to pre-compute.
- **`C`**: Window bitsize, a key parameter in the "bucket method" for MSM.
- **`Bitsize`**: Number of bits of the largest scalar.
- **`LargeBucketFactor`**: Sensitivity to frequently occurring buckets.
- **`batchSize`**: Number of results to compute in one batch.
- **`areScalarsOnDevice`**: Indicates if scalars are located on the device.
- **`AreScalarsMontgomeryForm`**: True if scalars are in Montgomery form.
- **`arePointsOnDevice`**: Indicates if points are located on the device.
- **`ArePointsMontgomeryForm`**: True if point coordinates are in Montgomery form.
- **`areResultsOnDevice`**: Indicates if results are stored on the device.
- **`IsBigTriangle`**: If `true` MSM will run in Large triangle accumulation if `false` Bucket accumulation will be chosen. Default value: false.
- **`IsAsync`**: If true, runs MSM asynchronously.
### Default Configuration
Use `GetDefaultMSMConfig` to obtain a default configuration, which can then be customized as needed.
```go
func GetDefaultMSMConfig() MSMConfig
```
## How do I toggle between the supported algorithms?
When creating your MSM Config you may state which algorithm you wish to use. `cfg.Ctx.IsBigTriangle = true` will activate Large triangle reduction and `cfg.Ctx.IsBigTriangle = false` will activate iterative reduction.
```go
...
// Obtain the default MSM configuration.
cfg := GetDefaultMSMConfig()
cfg.Ctx.IsBigTriangle = true
...
```
## How do I toggle between MSM modes?
Toggling between MSM modes occurs automatically based on the number of results you are expecting from the `MSM` function.
The number of results is interpreted from the size of `var out core.DeviceSlice`. Thus it's important when allocating memory for `var out core.DeviceSlice` to make sure that you are allocating `<number of results> X <size of a single point>`.
```go
...
batchSize := 3
var p G2Projective
var out core.DeviceSlice
out.Malloc(batchSize*p.Size(), p.Size())
...
```
## Parameters for optimal performance
Please refer to the [primitive description](../primitives/msm#choosing-optimal-parameters)
## Support for G2 group
To activate G2 support first you must make sure you are building the static libraries with G2 feature enabled as described in the [Golang building instructions](../golang-bindings.md#using-icicle-golang-bindings-in-your-project).
Now you may import `g2` package of the specified curve.
```go
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254/g2"
)
```
This package includes `G2Projective` and `G2Affine` points as well as a `G2Msm` method.
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
g2 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254/g2"
)
func main() {
cfg := core.GetDefaultMSMConfig()
size := 1 << 12
batchSize := 3
totalSize := size * batchSize
scalars := bn254.GenerateScalars(totalSize)
points := g2.G2GenerateAffinePoints(totalSize)
var p g2.G2Projective
var out core.DeviceSlice
out.Malloc(batchSize*p.Size(), p.Size())
g2.G2Msm(scalars, points, &cfg, out)
}
```
`G2Msm` works the same way as normal MSM, the difference is that it uses G2 Points.

View File

@@ -0,0 +1,155 @@
# Multi GPU APIs
To learn more about the theory of Multi GPU programming refer to [this part](../multi-gpu.md) of documentation.
Here we will cover the core multi GPU apis and an [example](#a-multi-gpu-example)
## A Multi GPU example
In this example we will display how you can
1. Fetch the number of devices installed on a machine
2. For every GPU launch a thread and set an active device per thread.
3. Execute a MSM on each GPU
```go
package main
import (
"fmt"
"sync"
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
)
func main() {
numDevices, _ := cr.GetDeviceCount()
fmt.Println("There are ", numDevices, " devices available")
wg := sync.WaitGroup{}
for i := 0; i < numDevices; i++ {
wg.Add(1)
// RunOnDevice makes sure each MSM runs on a single thread
cr.RunOnDevice(i, func(args ...any) {
defer wg.Done()
cfg := bn254.GetDefaultMSMConfig()
cfg.IsAsync = true
for _, power := range []int{10, 18} {
size := 1 << power // 2^pwr
// generate random scalars
scalars := bn254.GenerateScalars(size)
points := bn254.GenerateAffinePoints(size)
// create a stream and allocate result pointer
stream, _ := cr.CreateStream()
var p bn254.Projective
var out core.DeviceSlice
out.MallocAsync(p.Size(), p.Size(), stream)
// assign stream to device context
cfg.Ctx.Stream = &stream
// execute MSM
bn254.Msm(scalars, points, &cfg, out)
// read result from device
outHost := make(core.HostSlice[bn254.Projective], 1)
outHost.CopyFromDeviceAsync(&out, stream)
out.FreeAsync(stream)
// sync the stream
cr.SynchronizeStream(&stream)
}
})
}
wg.Wait()
}
```
This example demonstrates a basic pattern for distributing tasks across multiple GPUs. The `RunOnDevice` function ensures that each goroutine is executed on its designated GPU and a corresponding thread.
## Device Management API
To streamline device management we offer as part of `cuda_runtime` package methods for dealing with devices.
### `RunOnDevice`
Runs a given function on a specific GPU device, ensuring that all CUDA calls within the function are executed on the selected device.
In Go, most concurrency can be done via Goroutines. However, there is no guarantee that a goroutine stays on a specific host thread.
`RunOnDevice` was designed to solve this caveat and ensure that the goroutine will stay on a specific host thread.
`RunOnDevice` locks a goroutine into a specific host thread, sets a current GPU device, runs a provided function, and unlocks the goroutine from the host thread after the provided function finishes.
While the goroutine is locked to the host thread, the Go runtime will not assign other goroutines to that host thread.
**Parameters:**
- **`deviceId int`**: The ID of the device on which to run the provided function. Device IDs start from 0.
- **`funcToRun func(args ...any)`**: The function to be executed on the specified device.
- **`args ...any`**: Arguments to be passed to `funcToRun`.
**Behavior:**
- The function `funcToRun` is executed in a new goroutine that is locked to a specific OS thread to ensure that all CUDA calls within the function target the specified device.
:::note
Any goroutines launched within `funcToRun` are not automatically bound to the same GPU device. If necessary, `RunOnDevice` should be called again within such goroutines with the same `deviceId`.
:::
**Example:**
```go
RunOnDevice(0, func(args ...any) {
fmt.Println("This runs on GPU 0")
// CUDA-related operations here will target GPU 0
}, nil)
```
### `SetDevice`
Sets the active device for the current host thread. All subsequent CUDA calls made from this thread will target the specified device.
:::warning
This function should not be used directly in conjunction with goroutines. If you want to run multi-gpu scenarios with goroutines you should use [RunOnDevice](#runondevice)
:::
**Parameters:**
- **`device int`**: The ID of the device to set as the current device.
**Returns:**
- **`CudaError`**: Error code indicating the success or failure of the operation.
### `GetDeviceCount`
Retrieves the number of CUDA-capable devices available on the host.
**Returns:**
- **`(int, CudaError)`**: The number of devices and an error code indicating the success or failure of the operation.
### `GetDevice`
Gets the ID of the currently active device for the calling host thread.
**Returns:**
- **`(int, CudaError)`**: The ID of the current device and an error code indicating the success or failure of the operation.
### `GetDeviceFromPointer`
Retrieves the device associated with a given pointer.
**Parameters:**
- **`ptr unsafe.Pointer`**: Pointer to query.
**Returns:**
- **`int`**: The device ID associated with the memory pointed to by `ptr`.
This documentation should provide a clear understanding of how to effectively manage multiple GPUs in Go applications using CUDA, with a particular emphasis on the `RunOnDevice` function for executing tasks on specific GPUs.

View File

@@ -0,0 +1,151 @@
# NTT
## NTT Example
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
"github.com/consensys/gnark-crypto/ecc/bn254/fr/fft"
)
func init() {
cfg := bn254.GetDefaultNttConfig()
initDomain(18, cfg)
}
func initDomain[T any](largestTestSize int, cfg core.NTTConfig[T]) core.IcicleError {
rouMont, _ := fft.Generator(uint64(1 << largestTestSize))
rou := rouMont.Bits()
rouIcicle := bn254.ScalarField{}
rouIcicle.FromLimbs(rou[:])
e := bn254.InitDomain(rouIcicle, cfg.Ctx, false)
return e
}
func main() {
// Obtain the default NTT configuration with a predefined coset generator.
cfg := bn254.GetDefaultNttConfig()
// Define the size of the input scalars.
size := 1 << 18
// Generate scalars for the NTT operation.
scalars := bn254.GenerateScalars(size)
// Set the direction of the NTT (forward or inverse).
dir := core.KForward
// Allocate memory for the results of the NTT operation.
results := make(core.HostSlice[bn254.ScalarField], size)
// Perform the NTT operation.
err := bn254.Ntt(scalars, dir, &cfg, results)
if err.CudaErrorCode != cr.CudaSuccess {
panic("NTT operation failed")
}
}
```
## NTT Method
```go
func Ntt[T any](scalars core.HostOrDeviceSlice, dir core.NTTDir, cfg *core.NTTConfig[T], results core.HostOrDeviceSlice) core.IcicleError
```
### Parameters
- **`scalars`**: A slice containing the input scalars for the transform. It can reside either in host memory or device memory.
- **`dir`**: The direction of the NTT operation (`KForward` or `KInverse`).
- **`cfg`**: A pointer to an `NTTConfig` object, containing configuration options for the NTT operation.
- **`results`**: A slice where the results of the NTT operation will be stored. This slice can be in host or device memory.
### Return Value
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the NTT operation.
## NTT Configuration (NTTConfig)
The `NTTConfig` structure holds configuration parameters for the NTT operation, allowing customization of its behavior to optimize performance based on the specifics of your protocol.
```go
type NTTConfig[T any] struct {
Ctx cr.DeviceContext
CosetGen T
BatchSize int32
ColumnsBatch bool
Ordering Ordering
areInputsOnDevice bool
areOutputsOnDevice bool
IsAsync bool
NttAlgorithm NttAlgorithm
}
```
### Fields
- **`Ctx`**: Device context containing details like device ID and stream ID.
- **`CosetGen`**: Coset generator used for coset (i)NTTs, defaulting to no coset being used.
- **`BatchSize`**: The number of NTTs to compute in one operation, defaulting to 1.
- **`ColumnsBatch`**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
- **`Ordering`**: Ordering of inputs and outputs (`KNN`, `KNR`, `KRN`, `KRR`, `KMN`, `KNM`), affecting how data is arranged.
- **`areInputsOnDevice`**: Indicates if input scalars are located on the device.
- **`areOutputsOnDevice`**: Indicates if results are stored on the device.
- **`IsAsync`**: Controls whether the NTT operation runs asynchronously.
- **`NttAlgorithm`**: Explicitly select the NTT algorithm. Default value: Auto (the implementation selects radix-2 or mixed-radix algorithm based on heuristics).
### Default Configuration
Use `GetDefaultNTTConfig` to obtain a default configuration, customizable as needed.
```go
func GetDefaultNTTConfig[T any](cosetGen T) NTTConfig[T]
```
### Initializing the NTT Domain
Before performing NTT operations, it's necessary to initialize the NTT domain; it only needs to be called once per GPU since the twiddles are cached.
```go
func InitDomain(primitiveRoot ScalarField, ctx cr.DeviceContext, fastTwiddles bool) core.IcicleError
```
This function initializes the domain with a given primitive root, optionally using fast twiddle factors to optimize the computation.
### Releasing the domain
The `ReleaseDomain` function is responsible for releasing the resources associated with a specific domain in the CUDA device context.
```go
func ReleaseDomain(ctx cr.DeviceContext) core.IcicleError
```
### Parameters
- **`ctx`**: a reference to the `DeviceContext` object, which represents the CUDA device context.
### Return Value
The function returns a `core.IcicleError`, which represents the result of the operation. If the operation is successful, the function returns `core.IcicleErrorCode(0)`.
### Example
```go
import (
"github.com/icicle-crypto/icicle-core/cr"
"github.com/icicle-crypto/icicle-core/core"
)
func example() {
cfg := GetDefaultNttConfig()
err := ReleaseDomain(cfg.Ctx)
if err != nil {
// Handle the error
}
}
```

View File

@@ -0,0 +1,188 @@
# Vector Operations
## Overview
Icicle exposes a number of vector operations which a user can use:
* The VecOps API provides efficient vector operations such as addition, subtraction, and multiplication.
* MatrixTranspose API allows a user to perform a transpose on a vector representation of a matrix
## VecOps API Documentation
### Example
#### Vector addition
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
)
func main() {
testSize := 1 << 12
a := bn254.GenerateScalars(testSize)
b := bn254.GenerateScalars(testSize)
out := make(core.HostSlice[bn254.ScalarField], testSize)
cfg := core.DefaultVecOpsConfig()
// Perform vector multiplication
err := bn254.VecOp(a, b, out, cfg, core.Add)
if err != cr.CudaSuccess {
panic("Vector addition failed")
}
}
```
#### Vector Subtraction
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
)
func main() {
testSize := 1 << 12
a := bn254.GenerateScalars(testSize)
b := bn254.GenerateScalars(testSize)
out := make(core.HostSlice[bn254.ScalarField], testSize)
cfg := core.DefaultVecOpsConfig()
// Perform vector multiplication
err := bn254.VecOp(a, b, out, cfg, core.Sub)
if err != cr.CudaSuccess {
panic("Vector subtraction failed")
}
}
```
#### Vector Multiplication
```go
package main
import (
"github.com/ingonyama-zk/icicle/v2/wrappers/golang/core"
cr "github.com/ingonyama-zk/icicle/v2/wrappers/golang/cuda_runtime"
bn254 "github.com/ingonyama-zk/icicle/v2/wrappers/golang/curves/bn254"
)
func main() {
testSize := 1 << 12
a := bn254.GenerateScalars(testSize)
b := bn254.GenerateScalars(testSize)
out := make(core.HostSlice[bn254.ScalarField], testSize)
cfg := core.DefaultVecOpsConfig()
// Perform vector multiplication
err := bn254.VecOp(a, b, out, cfg, core.Mul)
if err != cr.CudaSuccess {
panic("Vector multiplication failed")
}
}
```
### VecOps Method
```go
func VecOp(a, b, out core.HostOrDeviceSlice, config core.VecOpsConfig, op core.VecOps) (ret cr.CudaError)
```
#### Parameters
- **`a`**: The first input vector.
- **`b`**: The second input vector.
- **`out`**: The output vector where the result of the operation will be stored.
- **`config`**: A `VecOpsConfig` object containing various configuration options for the vector operations.
- **`op`**: The operation to perform, specified as one of the constants (`Sub`, `Add`, `Mul`) from the `VecOps` type.
#### Return Value
- **`CudaError`**: Returns a CUDA error code indicating the success or failure of the vector operation.
### VecOpsConfig
The `VecOpsConfig` structure holds configuration parameters for the vector operations, allowing customization of its behavior.
```go
type VecOpsConfig struct {
Ctx cr.DeviceContext
isAOnDevice bool
isBOnDevice bool
isResultOnDevice bool
IsAsync bool
}
```
#### Fields
- **Ctx**: Device context containing details like device ID and stream ID.
- **isAOnDevice**: Indicates if vector `a` is located on the device.
- **isBOnDevice**: Indicates if vector `b` is located on the device.
- **isResultOnDevice**: Specifies where the result vector should be stored (device or host memory).
- **IsAsync**: Controls whether the vector operation runs asynchronously.
#### Default Configuration
Use `DefaultVecOpsConfig` to obtain a default configuration, customizable as needed.
```go
func DefaultVecOpsConfig() VecOpsConfig
```
## MatrixTranspose API Documentation
This section describes the functionality of the `TransposeMatrix` function used for matrix transposition.
The function takes a matrix represented as a 1D slice and transposes it, storing the result in another 1D slice.
### Function
```go
func TransposeMatrix(in, out core.HostOrDeviceSlice, columnSize, rowSize int, ctx cr.DeviceContext, onDevice, isAsync bool) (ret core.IcicleError)
```
## Parameters
- **`in`**: The input matrix is a `core.HostOrDeviceSlice`, stored as a 1D slice.
- **`out`**: The output matrix is a `core.HostOrDeviceSlice`, which will be the transpose of the input matrix, stored as a 1D slice.
- **`columnSize`**: The number of columns in the input matrix.
- **`rowSize`**: The number of rows in the input matrix.
- **`ctx`**: The device context `cr.DeviceContext` to be used for the matrix transpose operation.
- **`onDevice`**: Indicates whether the input and output slices are stored on the device (GPU) or the host (CPU).
- **`isAsync`**: Indicates whether the matrix transpose operation should be executed asynchronously.
## Return Value
The function returns a `core.IcicleError` value, which represents the result of the matrix transpose operation. If the operation is successful, the returned value will be `0`.
## Example Usage
```go
var input = make(core.HostSlice[ScalarField], 20)
var output = make(core.HostSlice[ScalarField], 20)
// Populate the input matrix
// ...
// Get device context
ctx, _ := cr.GetDefaultDeviceContext()
// Transpose the matrix
err := TransposeMatrix(input, output, 5, 4, ctx, false, false)
if err.IcicleErrorCode != core.IcicleErrorCode(0) {
// Handle the error
}
// Use the transposed matrix
// ...
```
In this example, the `TransposeMatrix` function is used to transpose a 5x4 matrix stored in a 1D slice. The input and output slices are stored on the host (CPU), and the operation is executed synchronously.

BIN
docs/docs/icicle/image.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

View File

@@ -0,0 +1,97 @@
# ICICLE integrated provers
ICICLE has been used by companies and projects such as [Celer Network](https://github.com/celer-network), [Consensys Gnark](https://github.com/Consensys/gnark), [EZKL](https://blog.ezkl.xyz/post/acceleration/), [ZKWASM](https://twitter.com/DelphinusLab/status/1762604988797513915) and others to accelerate their ZK proving pipeline.
Many of these integrations have been a collaboration between Ingonyama and the integrating company. We have learned a lot about designing GPU based ZK provers.
If you're interested in understanding these integrations better or learning how you can use ICICLE to accelerate your existing ZK proving pipeline this is the place for you.
## A primer to building your own integrations
Lets illustrate an ICICLE integration, so you can understand the core API and design overview of ICICLE.
![ICICLE architecture](../../static/img/architecture-high-level.png)
Engineers usually use a cryptographic library to implement their ZK protocols. These libraries implement efficient primitives which are used as building blocks for the protocol; ICICLE is such a library. The difference is that ICICLE is designed from the start to run on GPUs; the Rust and Golang APIs abstract away all low level CUDA details. Our goal was to allow developers with no GPU experience to quickly get started with ICICLE.
A developer may use ICICLE with two main approaches in mind.
1. Drop-in replacement approach.
2. End-to-End GPU replacement approach.
The first approach for GPU-accelerating your Prover with ICICLE is quick to implement, but it has limitations, such as reduced memory optimization and limited protocol tuning for GPUs. It's a solid starting point, but those committed to fully leveraging GPU acceleration should consider a more comprehensive approach.
A End-to-End GPU replacement means performing the entire ZK proof on the GPU. This approach will reduce latency to a minimum and requires you to change the way you implement the protocol to be more GPU friendly. This approach will take full advantage of GPU acceleration. Redesigning your prover this way may take more engineering effort but we promise you that its worth it!
## Using ICICLE integrated provers
Here we cover how a developer can run existing circuits on ICICLE integrated provers.
### Gnark
[Gnark](https://github.com/Consensys/gnark) officially supports GPU proving with ICICLE. Currently only Groth16 on curve `BN254` is supported. This means that if you are currently using Gnark to write your circuits you can enjoy GPU acceleration without making many changes.
:::info
Currently ICICLE has been merged to Gnark [master branch](https://github.com/Consensys/gnark), however the [latest release](https://github.com/Consensys/gnark/releases/tag/v0.9.1) is from October 2023.
:::
Make sure your golang circuit project has `gnark` as a dependency and that you are using the master branch for now.
```
go get github.com/consensys/gnark@master
```
You should see two indirect dependencies added.
```
...
github.com/ingonyama-zk/icicle v0.1.0 // indirect
github.com/ingonyama-zk/iciclegnark v0.1.1 // indirect
...
```
:::info
As you may notice we are using ICICLE v0.1 here since golang bindings are only support in ICICLE v0.1 for the time being.
:::
To switch over to ICICLE proving, make sure to change the backend you are using, below is an example of how this should be done.
```
// toggle on
proofIci, err := groth16.Prove(ccs, pk, secretWitness, backend.WithIcicleAcceleration())
// toggle off
proof, err := groth16.Prove(ccs, pk, secretWitness)
```
Now that you have enabled `WithIcicleAcceleration` backend simple change the way your run your circuits to:
```
go run -tags=icicle main.go
```
Your logs should look something like this if everything went as expected.
```
13:12:05 INF compiling circuit
13:12:05 INF parsed circuit inputs nbPublic=1 nbSecret=1
13:12:05 INF building constraint builder nbConstraints=3
13:12:05 DBG precomputing proving key in GPU acceleration=icicle backend=groth16 curve=bn254 nbConstraints=3
13:12:05 DBG constraint system solver done nbConstraints=3 took=0.070259
13:12:05 DBG prover done acceleration=icicle backend=groth16 curve=bn254 nbConstraints=3 took=80.356684
13:12:05 DBG verifier done backend=groth16 curve=bn254 took=1.843888
```
`acceleration=icicle` indicates that the prover is running in acceleration mode with ICICLE.
You can reference the [Gnark docs](https://github.com/Consensys/gnark?tab=readme-ov-file#gpu-support) for further information.
### Halo2
[Halo2](https://github.com/zkonduit/halo2) fork integrated with ICICLE for GPU acceleration. This means that you can run your existing Halo2 circuits with GPU acceleration just by activating a feature flag.
To enable GPU acceleration just enable `icicle_gpu` [feature flag](https://github.com/zkonduit/halo2/blob/3d7b5e61b3052680ccb279e05bdcc21dd8a8fedf/halo2_proofs/Cargo.toml#L102).
This feature flag will seamlessly toggle on GPU acceleration for you.

View File

@@ -0,0 +1,247 @@
# Getting started with ICICLE
This guide is oriented towards developers who want to start writing code with the ICICLE libraries. If you just want to run your existing ZK circuits on GPU refer to [this guide](./integrations.md#using-icicle-integrations) please.
## ICICLE repository overview
![ICICLE API overview](../../static/img/apilevels.png)
The diagram above displays the general architecture of ICICLE and the API layers that exist. The CUDA API, which we also call ICICLE Core, is the lowest level and is comprised of CUDA kernels which implement all primitives such as MSM as well as C++ wrappers which expose these methods for different curves.
ICICLE Core compiles into a static library. This library can be used with our official Golang and Rust wrappers or linked with your C++ project. You can also implement a wrapper for it in any other language.
Based on this dependency architecture, the ICICLE repository has three main sections:
- [ICICLE Core](#icicle-core)
- [ICICLE Rust bindings](#icicle-rust-and-golang-bindings)
- [ICICLE Golang bindings](#icicle-rust-and-golang-bindings)
### ICICLE Core
[ICICLE Core](/icicle/core) is a library that directly works with GPU by defining CUDA kernels and algorithms that invoke them. It contains code for [fast field arithmetic](https://github.com/ingonyama-zk/icicle/tree/main/icicle/include/field/field.cuh), cryptographic primitives used in ZK such as [NTT](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/ntt/), [MSM](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/msm/), [Poseidon Hash](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/poseidon/), [Polynomials](https://github.com/ingonyama-zk/icicle/tree/main/icicle/src/polynomials/) and others.
ICICLE Core would typically be compiled into a static library and either used in a third party language such as Rust or Golang, or linked with your own C++ project.
### ICICLE Rust and Golang bindings
- [ICICLE Rust bindings](/icicle/rust-bindings)
- [ICICLE Golang bindings](/icicle/golang-bindings)
These bindings allow you to easily use ICICLE in a Rust or Golang project. Setting up Golang bindings requires a bit of extra steps compared to the Rust bindings which utilize the `cargo build` tool.
## Running ICICLE
This guide assumes that you have a Linux or Windows machine with an Nvidia GPU installed. If you don't have access to an Nvidia GPU you can access one for free on [Google Colab](https://colab.google/).
:::info note
ICICLE can only run on Linux or Windows. **MacOS is not supported**.
:::
### Prerequisites
- NVCC (version 12.0 or newer)
- cmake 3.18 and above
- GCC - version 9 or newer is recommended.
- Any Nvidia GPU
- Linux or Windows operating system.
#### Optional Prerequisites
- Docker, latest version.
- [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html)
If you don't wish to install these prerequisites you can follow this tutorial using a [ZK-Container](https://github.com/ingonyama-zk/icicle/blob/main/Dockerfile) (docker container). To learn more about using ZK-Containers [read this](../ZKContainers.md).
### Setting up ICICLE and running tests
The objective of this guide is to make sure you can run the ICICLE Core, Rust and Golang tests. Achieving this will ensure you know how to setup ICICLE and run an ICICLE program. For simplicity, we will be using the ICICLE docker container as our environment, however, you may install the prerequisites on your machine and [skip](#icicle-core-1) the docker section.
#### Setting up environment with Docker
Lets begin by cloning the ICICLE repository:
```sh
git clone https://github.com/ingonyama-zk/icicle
```
We will proceed to build the docker image [found here](https://github.com/ingonyama-zk/icicle/blob/main/Dockerfile):
```sh
docker build -t icicle-demo .
docker run -it --runtime=nvidia --gpus all --name icicle_container icicle-demo
```
- `-it` runs the container in interactive mode with a terminal.
- `--gpus all` Allocate all available GPUs to the container. You can also specify which GPUs to use if you don't want to allocate all.
- `--runtime=nvidia` Use the NVIDIA runtime, necessary for GPU support.
To read more about these settings reference this [article](https://developer.nvidia.com/nvidia-container-runtime).
If you accidentally close your terminal and want to reconnect just call:
```sh
docker exec -it icicle_container bash
```
Lets make sure that we have the correct CUDA version before proceeding
```sh
nvcc --version
```
You should see something like this
```sh
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2023 NVIDIA Corporation
Built on Tue_Aug_15_22:02:13_PDT_2023
Cuda compilation tools, release 12.2, V12.2.140
Build cuda_12.2.r12.2/compiler.33191640_0
```
Make sure the release version is at least 12.0.
#### ICICLE Core
ICICLE Core is found under [`<project_root>/icicle`](https://github.com/ingonyama-zk/icicle/tree/main/icicle). To build and run the tests first:
```sh
cd icicle
```
For this example, we are going to compile ICICLE for a `bn254` curve. However other compilation strategies are supported.
```sh
mkdir -p build
cmake -S . -B build -DCURVE=bn254 -DBUILD_TESTS=ON
cmake --build build -j
```
`-DBUILD_TESTS` option compiles the tests, without this flag `ctest` won't work.
`-DCURVE` option tells the compiler which curve to build. You can find a list of supported curves [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/cmake/CurvesCommon.cmake#L2).
The output in `build` folder should include the static libraries for the compiled curve.
To run the test
```sh
cd build/tests
ctest
```
#### ICICLE Rust
The rust bindings work by first compiling the CUDA static libraries as seen [here](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/rust/icicle-curves/icicle-bn254/build.rs). The compilation of CUDA and the Rust library is all handled by the rust build toolchain.
Similar to ICICLE Core here we also have to compile per curve.
Lets compile curve `bn254`
```sh
cd wrappers/rust/icicle-curves/icicle-bn254
```
Now lets build our library
```sh
cargo build --release
```
This may take a couple of minutes since we are compiling both the CUDA and Rust code.
To run the tests
```sh
cargo test
```
We also include some benchmarks
```sh
cargo bench
```
#### ICICLE Golang
The Golang bindings require compiling ICICLE Core first. We supply a [build script](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/golang/build.sh) to help build what you need.
Script usage:
```sh
./build.sh [-curve=<curve>] [-field=<field>] [-hash=<hash>] [-cuda_version=<version>] [-g2] [-ecntt] [-devmode]
curve - The name of the curve to build or "all" to build all supported curves
field - The name of the field to build or "all" to build all supported fields
hash - The name of the hash to build or "all" to build all supported hashes
-g2 - Optional - build with G2 enabled
-ecntt - Optional - build with ECNTT enabled
-devmode - Optional - build in devmode
```
:::note
If more than one curve or more than one field or more than one hash is supplied, the last one supplied will be built
:::
Once the library has been built, you can use and test the Golang bindings.
To test a specific curve, field or hash, change to it's directory and then run:
```sh
go test ./tests -count=1 -failfast -timeout 60m -p 2 -v
```
You will be able to see each test that runs, how long it takes and whether it passed or failed
### Running ICICLE examples
ICICLE examples can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/examples) these examples cover some simple use cases using C++, rust and golang.
Lets run one of our C++ examples, in this case the [MSM example](https://github.com/ingonyama-zk/icicle/blob/main/examples/c%2B%2B/msm/example.cu).
```sh
cd examples/c++/msm
./compile.sh
./run.sh
```
:::tip
Read through the compile.sh and CMakeLists.txt to understand how to link your own C++ project with ICICLE
:::
#### Running with Docker
In each example directory, ZK-container files are located in a subdirectory `.devcontainer`.
```sh
msm/
├── .devcontainer
├── devcontainer.json
└── Dockerfile
```
Now lets build our docker file and run the test inside it. Make sure you have installed the [optional prerequisites](#optional-prerequisites).
```sh
docker build -t icicle-example-msm -f .devcontainer/Dockerfile .
```
Lets start and enter the container
```sh
docker run -it --rm --gpus all -v .:/icicle-example icicle-example-msm
```
Inside the container you can run the same commands:
```sh
./compile.sh
./run.sh
```
You can now experiment with our other examples, perhaps try to run a rust or golang example next.

View File

@@ -0,0 +1,61 @@
# Multi GPU with ICICLE
:::info
If you are looking for the Multi GPU API documentation refer [here](./rust-bindings/multi-gpu.md) for Rust and [here](./golang-bindings/multi-gpu.md) for Golang.
:::
One common challenge with Zero-Knowledge computation is managing the large input sizes. It's not uncommon to encounter circuits surpassing 2^25 constraints, pushing the capabilities of even advanced GPUs to their limits. To effectively scale and process such large circuits, leveraging multiple GPUs in tandem becomes a necessity.
Multi-GPU programming involves developing software to operate across multiple GPU devices. Lets first explore different approaches to Multi-GPU programming then we will cover how ICICLE allows you to easily develop youR ZK computations to run across many GPUs.
## Approaches to Multi GPU programming
There are many [different strategies](https://github.com/NVIDIA/multi-gpu-programming-models) available for implementing multi GPU, however, it can be split into two categories.
### GPU Server approach
This approach usually involves a single or multiple CPUs opening threads to read / write from multiple GPUs. You can think about it as a scaled up HOST - Device model.
![alt text](image.png)
This approach won't let us tackle larger computation sizes but it will allow us to compute multiple computations which we wouldn't be able to load onto a single GPU.
For example let's say that you had to compute two MSMs of size 2^26 on a 16GB VRAM GPU you would normally have to perform them asynchronously. However, if you double the number of GPUs in your system you can now run them in parallel.
### Inter GPU approach
This approach involves a more sophisticated approach to multi GPU computation. Using technologies such as [GPUDirect, NCCL, NVSHMEM](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-cwes1084/) and NVLink it's possible to combine multiple GPUs and split a computation among different devices.
This approach requires redesigning the algorithm at the software level to be compatible with splitting amongst devices. In some cases, to lower latency to a minimum, special inter GPU connections would be installed on a server to allow direct communication between multiple GPUs.
## Writing ICICLE Code for Multi GPUs
The approach we have taken for the moment is a GPU Server approach; we assume you have a machine with multiple GPUs and you wish to run some computation on each GPU.
To dive deeper and learn about the API check out the docs for our different ICICLE API
- [Rust Multi GPU APIs](./rust-bindings/multi-gpu.md)
- [Golang Multi GPU APIs](./golang-bindings/multi-gpu.md)
- C++ Multi GPU APIs
## Best practices
- Never hardcode device IDs, if you want your software to take advantage of all GPUs on a machine use methods such as `get_device_count` to support arbitrary number of GPUs.
- Launch one CPU thread per GPU. To avoid [nasty errors](https://developer.nvidia.com/blog/cuda-pro-tip-always-set-current-device-avoid-multithreading-bugs/) and hard to read code we suggest that for every GPU you create a dedicated thread. Within a CPU thread you should be able to launch as many tasks as you wish for a GPU as long as they all run on the same GPU id. This will make your code way more manageable, easy to read and performant.
## ZKContainer support for multi GPUs
Multi GPU support should work with ZK-Containers by simply defining which devices the docker container should interact with:
```sh
docker run -it --gpus '"device=0,2"' zk-container-image
```
If you wish to expose all GPUs
```sh
docker run --gpus all zk-container-image
```

View File

@@ -0,0 +1,58 @@
# What is ICICLE?
[![GitHub Release](https://img.shields.io/github/v/release/ingonyama-zk/icicle)](https://github.com/ingonyama-zk/icicle/releases)
[ICICLE](https://github.com/ingonyama-zk/icicle) is a cryptography library for ZK using GPUs. ICICLE implements blazing fast cryptographic primitives such as EC operations, MSM, NTT, Poseidon hash and more on GPU.
ICICLE allows developers with minimal GPU experience to effortlessly accelerate their ZK application; from our experiments, even the most naive implementation may yield 10X improvement in proving times.
ICICLE has been used by many leading ZK companies such as [Celer Network](https://github.com/celer-network), [Gnark](https://github.com/Consensys/gnark) and others to accelerate their ZK proving pipeline.
## Dont have access to a GPU?
We understand that not all developers have access to a GPU and we don't want this to limit anyone from developing with ICICLE.
Here are some ways we can help you gain access to GPUs:
:::note
If none of the following options suit your needs, contact us on [telegram](https://t.me/RealElan) for assistance. We're committed to ensuring that a lack of a GPU doesn't become a bottleneck for you. If you need help with setup or any other issues, we're here to help you.
:::
### Grants
At Ingonyama we are interested in accelerating the progress of ZK and cryptography. If you are an engineer, developer or an academic researcher we invite you to checkout [our grant program](https://www.ingonyama.com/blog/icicle-for-researchers-grants-challenges). We will give you access to GPUs and even pay you to do your dream research!
### Google Colab
This is a great way to get started with ICICLE instantly. Google Colab offers free GPU access to a NVIDIA T4 instance with 16 GB of memory which should be enough for experimenting and even prototyping with ICICLE.
For an extensive guide on how to setup Google Colab with ICICLE refer to [this article](./colab-instructions.md).
### Vast.ai
[Vast.ai](https://vast.ai/) is a global GPU marketplace where you can rent many different types of GPUs by the hour for [competitive pricing](https://vast.ai/pricing). They provide on-demand and interruptible rentals depending on your need or use case; you can learn more about their rental types [here](https://vast.ai/faq#rental-types).
## What can you do with ICICLE?
[ICICLE](https://github.com/ingonyama-zk/icicle) can be used in the same way you would use any other cryptography library. While developing and integrating ICICLE into many proof systems, we found some use case categories:
### Circuit developers
If you are a circuit developer and are experiencing bottlenecks while running your circuits, an ICICLE integrated prover may be the solution.
ICICLE has been integrated into a number of popular ZK provers including [Gnark prover](https://github.com/Consensys/gnark) and [Halo2](https://github.com/zkonduit/halo2). This means that you can enjoy GPU acceleration for your existing circuits immediately without writing a single line of code by simply switching on the GPU prover flag!
### Integrating into existing ZK provers
From our collaborations we have learned that its possible to accelerate a specific part of your prover to solve for a specific bottleneck.
ICICLE can be used to accelerate specific parts of your prover without completely rewriting your ZK prover.
### Developing your own ZK provers
If your goal is to build a ZK prover from the ground up, ICICLE is an ideal tool for creating a highly optimized and scalable ZK prover. A key benefit of using GPUs with ICICLE is the ability to scale your ZK prover efficiently across multiple machines within a data center.
### Developing proof of concepts
ICICLE is also ideal for developing small prototypes. ICICLE has Golang and Rust bindings so you can easily develop a library implementing a specific primitive using ICICLE. An example would be develop a KZG commitment library using ICICLE.

View File

@@ -0,0 +1,27 @@
@startuml
skinparam componentStyle uml2
' Define Components
component "C++ Template\nComponent" as CppTemplate {
[Parameterizable Interface]
}
component "C API Wrapper\nComponent" as CApiWrapper {
[C API Interface]
}
component "Rust Code\nComponent" as RustCode {
[Macro Interface\n(Template Instantiation)]
}
' Define Artifact
artifact "Static Library\n«artifact»" as StaticLib
' Connections
CppTemplate -down-> CApiWrapper : Instantiates
CApiWrapper .down.> StaticLib : Compiles into
RustCode -left-> StaticLib : Links against\nand calls via FFI
' Notes
note right of CppTemplate : Generic C++\ntemplate implementation
note right of CApiWrapper : Exposes C API for FFI\nto Rust/Go
note right of RustCode : Uses macros to\ninstantiate templates
@enduml

View File

@@ -0,0 +1,86 @@
@startuml
' Define Interface for Polynomial Backend Operations
interface IPolynomialBackend {
+add()
+subtract()
+multiply()
+divide()
+evaluate()
}
' Define Interface for Polynomial Context (State Management)
interface IPolynomialContext {
+initFromCoeffs()
+initFromEvals()
+getCoeffs()
+getEvals()
}
' PolynomialAPI now uses two strategies: Backend and Context
class PolynomialAPI {
-backendStrategy: IPolynomialBackend
-contextStrategy: IPolynomialContext
-setBackendStrategy(IPolynomialBackend)
-setContextStrategy(IPolynomialContext)
+add()
+subtract()
+multiply()
+divide()
+evaluate()
}
' Backend Implementations
class GPUPolynomialBackend implements IPolynomialBackend {
#gpuResources: Resource
+add()
+subtract()
+multiply()
+divide()
+evaluate()
}
class ZPUPolynomialBackend implements IPolynomialBackend {
#zpuResources: Resource
+add()
+subtract()
+multiply()
+divide()
+evaluate()
}
class TracerPolynomialBackend implements IPolynomialBackend {
#traceData: Data
+add()
+subtract()
+multiply()
+divide()
+evaluate()
}
' Context Implementations (Placeholder for actual implementation)
class GPUContext implements IPolynomialContext {
+initFromCoeffs()
+initFromEvals()
+getCoeffs()
+getEvals()
}
class ZPUContext implements IPolynomialContext {
+initFromCoeffs()
+initFromEvals()
+getCoeffs()
+getEvals()
}
class TracerContext implements IPolynomialContext {
+initFromCoeffs()
+initFromEvals()
+getCoeffs()
+getEvals()
}
' Relationships
PolynomialAPI o-- IPolynomialBackend : uses
PolynomialAPI o-- IPolynomialContext : uses
@enduml

View File

@@ -0,0 +1,388 @@
# Polynomial API Overview
:::note
Read our paper on the Polynomials API in ICICLE v2 by clicking [here](https://eprint.iacr.org/2024/973).
:::
## Introduction
The Polynomial API offers a robust framework for polynomial operations within a computational environment. It's designed for flexibility and efficiency, supporting a broad range of operations like arithmetic, evaluation, and manipulation, all while abstracting from the computation and storage specifics. This enables adaptability to various backend technologies, employing modern C++ practices.
## Key Features
### Backend Agnostic Architecture
Our API is structured to be independent of any specific computational backend. While a CUDA backend is currently implemented, the architecture facilitates easy integration of additional backends. This capability allows users to perform polynomial operations without the need to tailor their code to specific hardware, enhancing code portability and scalability.
### Templating in the Polynomial API
The Polynomial API is designed with a templated structure to accommodate different data types for coefficients, the domain, and images. This flexibility allows the API to be adapted for various computational needs and types of data.
```cpp
template <typename Coeff, typename Domain = Coeff, typename Image = Coeff>
class Polynomial {
// Polynomial class definition
}
```
In this template:
- **`Coeff`**: Represents the type of the coefficients of the polynomial.
- **`Domain`**: Specifies the type for the input values over which the polynomial is evaluated. By default, it is the same as the type of the coefficients but can be specified separately to accommodate different computational contexts.
- **`Image`**: Defines the type of the output values of the polynomial. This is typically the same as the coefficients.
#### Default instantiation
```cpp
extern template class Polynomial<scalar_t>;
```
#### Extended use cases
The templated nature of the Polynomial API also supports more complex scenarios. For example, coefficients and images could be points on an elliptic curve (EC points), which are useful in cryptographic applications and advanced algebraic structures. This approach allows the API to be extended easily to support new algebraic constructions without modifying the core implementation.
### Supported Operations
The Polynomial class encapsulates a polynomial, providing a variety of operations:
- **Construction**: Create polynomials from coefficients or evaluations on roots-of-unity domains.
- **Arithmetic Operations**: Perform addition, subtraction, multiplication, and division.
- **Evaluation**: Directly evaluate polynomials at specific points or across a domain.
- **Manipulation**: Features like slicing polynomials, adding or subtracting monomials inplace, and computing polynomial degrees.
- **Memory Access**: Access internal states or obtain device-memory views of polynomials.
## Usage
This section outlines how to use the Polynomial API in C++. Bindings for Rust and Go are detailed under the Bindings sections.
### Backend Initialization
Initialization with an appropriate factory is required to configure the computational context and backend.
```cpp
#include "polynomials/polynomials.h"
#include "polynomials/cuda_backend/polynomial_cuda_backend.cuh"
// Initialize with a CUDA backend
Polynomial::initialize(std::make_shared<CUDAPolynomialFactory>());
```
:::note
Initialization of a factory must be done per linked curve or field.
:::
### Construction
Polynomials can be constructed from coefficients, from evaluations on roots-of-unity domains, or by cloning existing polynomials.
```cpp
// Construction
static Polynomial from_coefficients(const Coeff* coefficients, uint64_t nof_coefficients);
static Polynomial from_rou_evaluations(const Image* evaluations, uint64_t nof_evaluations);
// Clone the polynomial
Polynomial clone() const;
```
Example:
```cpp
auto p_from_coeffs = Polynomial_t::from_coefficients(coeff /* :scalar_t* */, nof_coeffs);
auto p_from_rou_evals = Polynomial_t::from_rou_evaluations(rou_evals /* :scalar_t* */, nof_evals);
auto p_cloned = p.clone(); // p_cloned and p do not share memory
```
:::note
The coefficients or evaluations may be allocated either on host or device memory. In both cases the memory is copied to the backend device.
:::
### Arithmetic
Constructed polynomials can be used for various arithmetic operations:
```cpp
// Addition
Polynomial operator+(const Polynomial& rhs) const;
Polynomial& operator+=(const Polynomial& rhs); // inplace addition
// Subtraction
Polynomial operator-(const Polynomial& rhs) const;
// Multiplication
Polynomial operator*(const Polynomial& rhs) const;
Polynomial operator*(const Domain& scalar) const; // scalar multiplication
// Division A(x) = B(x)Q(x) + R(x)
std::pair<Polynomial, Polynomial> divide(const Polynomial& rhs) const; // returns (Q(x), R(x))
Polynomial operator/(const Polynomial& rhs) const; // returns quotient Q(x)
Polynomial operator%(const Polynomial& rhs) const; // returns remainder R(x)
Polynomial divide_by_vanishing_polynomial(uint64_t degree) const; // sdivision by the vanishing polynomial V(x)=X^N-1
```
#### Example
Given polynomials A(x),B(x),C(x) and V(x) the vanishing polynomial.
$$
H(x)=\frac{A(x) \cdot B(x) - C(x)}{V(x)} \space where \space V(x) = X^{N}-1
$$
```cpp
auto H = (A*B-C).divide_by_vanishing_polynomial(N);
```
### Evaluation
Evaluate polynomials at arbitrary domain points, across a domain or on a roots-of-unity domain.
```cpp
Image operator()(const Domain& x) const; // evaluate f(x)
void evaluate(const Domain* x, Image* evals /*OUT*/) const;
void evaluate_on_domain(Domain* domain, uint64_t size, Image* evals /*OUT*/) const; // caller allocates memory
void evaluate_on_rou_domain(uint64_t domain_log_size, Image* evals /*OUT*/) const; // caller allocate memory
```
Example:
```cpp
Coeff x = rand();
Image f_x = f(x); // evaluate f at x
// evaluate f(x) on a domain
uint64_t domain_size = ...;
auto domain = /*build domain*/; // host or device memory
auto evaluations = std::make_unique<scalar_t[]>(domain_size); // can be device memory too
f.evaluate_on_domain(domain, domain_size, evaluations);
// evaluate f(x) on roots of unity domain
uint64_t domain_log_size = ...;
auto evaluations_rou_domain = std::make_unique<scalar_t[]>(1 << domain_log_size); // can be device memory too
f.evaluate_on_rou_domain(domain_log_size, evaluations_rou_domain);
```
### Manipulations
Beyond arithmetic, the API supports efficient polynomial manipulations:
#### Monomials
```cpp
// Monomial operations
Polynomial& add_monomial_inplace(Coeff monomial_coeff, uint64_t monomial = 0);
Polynomial& sub_monomial_inplace(Coeff monomial_coeff, uint64_t monomial = 0);
```
The ability to add or subtract monomials directly and in-place is an efficient way to manipulate polynomials.
Example:
```cpp
f.add_monomial_in_place(scalar_t::from(5)); // f(x) += 5
f.sub_monomial_in_place(scalar_t::from(3), 8); // f(x) -= 3x^8
```
#### Computing the degree of a Polynomial
```cpp
// Degree computation
int64_t degree();
```
The degree of a polynomial is a fundamental characteristic that describes the highest power of the variable in the polynomial expression with a non-zero coefficient.
The `degree()` function in the API returns the degree of the polynomial, corresponding to the highest exponent with a non-zero coefficient.
- For the polynomial $f(x) = x^5 + 2x^3 + 4$, the degree is 5 because the highest power of $x$ with a non-zero coefficient is 5.
- For a scalar value such as a constant term (e.g., $f(x) = 7$, the degree is considered 0, as it corresponds to $x^0$.
- The degree of the zero polynomial, $f(x) = 0$, where there are no non-zero coefficients, is defined as -1. This special case often represents an "empty" or undefined state in many mathematical contexts.
Example:
```cpp
auto f = /*some expression*/;
auto degree_of_f = f.degree();
```
#### Slicing
```cpp
// Slicing and selecting even or odd components.
Polynomial slice(uint64_t offset, uint64_t stride, uint64_t size = 0 /*0 means take all elements*/);
Polynomial even();
Polynomial odd();
```
The Polynomial API provides methods for slicing polynomials and selecting specific components, such as even or odd indexed terms. Slicing allows extracting specific sections of a polynomial based on an offset, stride, and size.
The following examples demonstrate folding a polynomial's even and odd parts and arbitrary slicing;
```cpp
// folding a polynomials even and odd parts with randomness
auto x = rand();
auto even = f.even();
auto odd = f.odd();
auto fold_poly = even + odd * x;
// arbitrary slicing (first quarter)
auto first_quarter = f.slice(0 /*offset*/, 1 /*stride*/, f.degree()/4 /*size*/);
```
### Memory access (copy/view)
Access to the polynomial's internal state can be vital for operations like commitment schemes or when more efficient custom operations are necessary. This can be done either by copying or viewing the polynomial
#### Copying
Copies the polynomial coefficients to either host or device allocated memory.
:::note
Copying to host memory is backend agnostic while copying to device memory requires the memory to be allocated on the corresponding backend.
:::
```cpp
Coeff get_coeff(uint64_t idx) const; // copy single coefficient to host
uint64_t copy_coeffs(Coeff* coeffs, uint64_t start_idx, uint64_t end_idx) const;
```
Example:
```cpp
auto coeffs_device = /*allocate CUDA or host memory*/
f.copy_coeffs(coeffs_device, 0/*start*/, f.degree());
MSMConfig cfg = msm::defaultMSMConfig();
cfg.are_points_on_device = true; // assuming copy to device memory
auto rv = msm::MSM(coeffs_device, points, msm_size, cfg, results);
```
#### Views
The Polynomial API supports efficient data handling through the use of memory views. These views provide direct access to the polynomial's internal state without the need to copy data. This feature is particularly useful for operations that require direct access to device memory, enhancing both performance and memory efficiency.
##### What is a Memory View?
A memory view is essentially a pointer to data stored in device memory. By providing a direct access pathway to the data, it eliminates the need for data duplication, thus conserving both time and system resources. This is especially beneficial in high-performance computing environments where data size and operation speed are critical factors.
##### Applications of Memory Views
Memory views are extremely versatile and can be employed in various computational contexts such as:
- **Commitments**: Views can be used to commit polynomial states in cryptographic schemes, such as Multi-Scalar Multiplications (MSM).
- **External Computations**: They allow external functions or algorithms to utilize the polynomial's data directly, facilitating operations outside the core polynomial API. This is useful for custom operations that are not covered by the API.
##### Obtaining and Using Views
To create and use views within the Polynomial API, functions are provided to obtain pointers to both coefficients and evaluation data. Heres how they are generally structured:
```cpp
// Obtain a view of the polynomial's coefficients
std::tuple<IntegrityPointer<Coeff>, uint64_t /*size*/, uint64_t /*device_id*/> get_coefficients_view();
```
Example usage:
```cpp
auto [coeffs_view, size, device_id] = polynomial.get_coefficients_view();
// Use coeffs_view in a computational routine that requires direct access to polynomial coefficients
// Example: Passing the view to a GPU-accelerated function
gpu_accelerated_function(coeffs_view.get(),...);
```
##### Integrity-Pointer: Managing Memory Views
Within the Polynomial API, memory views are managed through a specialized tool called the Integrity-Pointer. This pointer type is designed to safeguard operations by monitoring the validity of the memory it points to. It can detect if the memory has been modified or released, thereby preventing unsafe access to stale or non-existent data.
The Integrity-Pointer not only acts as a regular pointer but also provides additional functionality to ensure the integrity of the data it references. Here are its key features:
```cpp
// Checks whether the pointer is still considered valid
bool isValid() const;
// Retrieves the raw pointer or nullptr if pointer is invalid
const T* get() const;
// Dereferences the pointer. Throws exception if the pointer is invalid.
const T& operator*() const;
//Provides access to the member of the pointed-to object. Throws exception if the pointer is invalid.
const T* operator->() const;
```
Consider the Following case:
```cpp
auto [coeff_view, size, device] = f.get_coefficients_view();
// Use the coefficients view to perform external operations
commit_to_polynomial(coeff_view.get(), size);
// Modification of the original polynomial
f += g; // Any operation that modifies 'f' potentially invalidates 'coeff_view'
// Check if the view is still valid before using it further
if (coeff_view.isValid()) {
perform_additional_computation(coeff_view.get(), size);
} else {
handle_invalid_data();
}
```
## Multi-GPU Support with CUDA Backend
The Polynomial API includes comprehensive support for multi-GPU environments, a crucial feature for leveraging the full computational power of systems equipped with multiple NVIDIA GPUs. This capability is part of the API's CUDA backend, which is designed to efficiently manage polynomial computations across different GPUs.
### Setting the CUDA Device
Like other components of the icicle framework, the Polynomial API allows explicit setting of the current CUDA device:
```cpp
cudaSetDevice(int deviceID);
```
This function sets the active CUDA device. All subsequent operations that allocate or deal with polynomial data will be performed on this device.
### Allocation Consistency
Polynomials are always allocated on the current CUDA device at the time of their creation. It is crucial to ensure that the device context is correctly set before initiating any operation that involves memory allocation:
```cpp
// Set the device before creating polynomials
cudaSetDevice(0);
Polynomial p1 = Polynomial::from_coefficients(coeffs, size);
cudaSetDevice(1);
Polynomial p2 = Polynomial::from_coefficients(coeffs, size);
```
### Matching Devices for Operations
When performing operations that result in the creation of new polynomials (such as addition or multiplication), it is imperative that both operands are on the same CUDA device. If the operands reside on different devices, an exception is thrown:
```cpp
// Ensure both operands are on the same device
cudaSetDevice(0);
auto p3 = p1 + p2; // Throws an exception if p1 and p2 are not on the same device
```
### Device-Agnostic Operations
Operations that do not involve the creation of new polynomials, such as computing the degree of a polynomial or performing in-place modifications, can be executed regardless of the current device setting:
```cpp
// 'degree' and in-place operations do not require device matching
int deg = p1.degree();
p1 += p2; // Valid if p1 and p2 are on the same device, throws otherwise
```
### Error Handling
The API is designed to throw exceptions if operations are attempted across polynomials that are not located on the same GPU. This ensures that all polynomial operations are performed consistently and without data integrity issues due to device mismatches.
### Best Practices
To maximize the performance and avoid runtime errors in a multi-GPU setup, always ensure that:
- The CUDA device is set correctly before polynomial allocation.
- Operations involving new polynomial creation are performed with operands on the same device.
By adhering to these guidelines, developers can effectively harness the power of multiple GPUs to handle large-scale polynomial computations efficiently.

Binary file not shown.

After

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 215 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 322 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View File

@@ -0,0 +1,79 @@
# Keccak
[Keccak](https://keccak.team/files/Keccak-implementation-3.2.pdf) is a cryptographic hash function designed by Guido Bertoni, Joan Daemen, Michaël Peeters, and Gilles Van Assche. It was selected as the winner of the NIST hash function competition, becoming the basis for the [SHA-3 standard](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf).
Keccak operates on a message input of any length and produces a fixed-size hash output. The hash function is built upon the sponge construction, which involves absorbing the input data followed by squeezing out the hash value.
At its core, Keccak consists of a permutation function operating on a state array. The permutation function employs a round function that operates iteratively on the state array. Each round consists of five main steps:
- **Theta:** This step introduces diffusion by performing a bitwise XOR operation between the state and a linear combination of its neighboring columns.
- **Rho:** This step performs bit rotation operations on each lane of the state array.
- **Pi:** This step rearranges the positions of the lanes in the state array.
- **Chi:** This step applies a nonlinear mixing operation to each lane of the state array.
- **Iota:** This step introduces a round constant to the state array.
## Keccak vs Sha3
There exists a [confusion](https://www.cybertest.com/blog/keccak-vs-sha3) between what is called `Keccak` and `Sha3`. In ICICLE we support both. `Keccak256` relates to the old hash function used in Ethereum, and `Sha3-256` relates to the modern hash function.
## Using Keccak
ICICLE Keccak supports batch hashing, which can be utilized for constructing a merkle tree or running multiple hashes in parallel.
### Supported Bindings
- [Golang](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/golang/hash/keccak)
- [Rust](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust/icicle-hash)
### Example usage
This is an example of running 1024 Keccak-256 hashes in parallel, where input strings are of size 136 bytes:
```rust
use icicle_core::hash::HashConfig;
use icicle_cuda_runtime::memory::HostSlice;
use icicle_hash::keccak::keccak256;
let config = HashConfig::default();
let input_block_len = 136;
let number_of_hashes = 1024;
let preimages = vec![1u8; number_of_hashes * input_block_len];
let mut digests = vec![0u8; number_of_hashes * 32];
let preimages_slice = HostSlice::from_slice(&preimages);
let digests_slice = HostSlice::from_mut_slice(&mut digests);
keccak256(
preimages_slice,
input_block_len as u32,
number_of_hashes as u32,
digests_slice,
&config,
)
.unwrap();
```
### Merkle Tree
You can build a keccak merkle tree using the corresponding functions:
```rust
use icicle_core::tree::{merkle_tree_digests_len, TreeBuilderConfig};
use icicle_cuda_runtime::memory::HostSlice;
use icicle_hash::keccak::build_keccak256_merkle_tree;
let mut config = TreeBuilderConfig::default();
config.arity = 2;
let height = 22;
let input_block_len = 136;
let leaves = vec![1u8; (1 << height) * input_block_len];
let mut digests = vec![0u64; merkle_tree_digests_len((height + 1) as u32, 2, 1)];
let leaves_slice = HostSlice::from_slice(&leaves);
let digests_slice = HostSlice::from_mut_slice(&mut digests);
build_keccak256_merkle_tree(leaves_slice, digests_slice, height, input_block_len, &config).unwrap();
```
In the example above, a binary tree of height 22 is being built. Each leaf is considered to be a 136 byte long array. The leaves and digests are aligned in a flat array. You can also use keccak512 in `build_keccak512_merkle_tree` function.

View File

@@ -0,0 +1,195 @@
# MSM - Multi scalar multiplication
MSM stands for Multi scalar multiplication, it's defined as:
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mi>M</mi>
<mi>S</mi>
<mi>M</mi>
<mo stretchy="false">(</mo>
<mi>a</mi>
<mo>,</mo>
<mi>G</mi>
<mo stretchy="false">)</mo>
<mo>=</mo>
<munderover>
<mo data-mjx-texclass="OP" movablelimits="false">&#x2211;</mo>
<mrow data-mjx-texclass="ORD">
<mi>j</mi>
<mo>=</mo>
<mn>0</mn>
</mrow>
<mrow data-mjx-texclass="ORD">
<mi>n</mi>
<mo>&#x2212;</mo>
<mn>1</mn>
</mrow>
</munderover>
<msub>
<mi>a</mi>
<mi>j</mi>
</msub>
<msub>
<mi>G</mi>
<mi>j</mi>
</msub>
</math>
Where
$G_j \in G$ - points from an Elliptic Curve group.
$a_0, \ldots, a_n$ - Scalars
$MSM(a, G) \in G$ - a single EC (elliptic curve) point
In words, MSM is the sum of scalar and EC point multiplications. We can see from this definition that the core operations occurring are Modular Multiplication and Elliptic curve point addition. It's obvious that multiplication can be computed in parallel and then the products summed, making MSM inherently parallelizable.
Accelerating MSM is crucial to a ZK protocol's performance due to the [large percent of run time](https://hackmd.io/@0xMonia/SkQ6-oRz3#Hardware-acceleration-in-action) they take when generating proofs.
You can learn more about how MSMs work from this [video](https://www.youtube.com/watch?v=Bl5mQA7UL2I) and from our resource list on [Ingopedia](https://www.ingonyama.com/ingopedia/msm).
## Supported Bindings
- [Golang](../golang-bindings/msm.md)
- [Rust](../rust-bindings//msm.md)
## Algorithm description
We follow the bucket method algorithm. The GPU implementation consists of four phases:
1. Preparation phase - The scalars are split into smaller scalars of `c` bits each. These are the bucket indices. The points are grouped according to their corresponding bucket index and the buckets are sorted by size.
2. Accumulation phase - Each bucket accumulates all of its points using a single thread. More than one thread is assigned to large buckets, in proportion to their size. A bucket is considered large if its size is above the large bucket threshold that is determined by the `large_bucket_factor` parameter. The large bucket threshold is the expected average bucket size times the `large_bucket_factor` parameter.
3. Buckets Reduction phase - bucket results are multiplied by their corresponding bucket number and each bucket module is reduced to a small number of final results. By default, this is done by an iterative algorithm which is highly parallel. Setting `is_big_triangle` to `true` will switch this phase to the running sum algorithm described in the above YouTube talk which is much less parallel.
4. Final accumulation phase - The final results from the last phase are accumulated using the double-and-add algorithm.
## Batched MSM
The MSM supports batch mode - running multiple MSMs in parallel. It's always better to use the batch mode instead of running single msms in serial as long as there is enough memory available. We support running a batch of MSMs that share the same points as well as a batch of MSMs that use different points.
## MSM configuration
```cpp
/**
* @struct MSMConfig
* Struct that encodes MSM parameters to be passed into the [MSM](@ref MSM) function. The intended use of this struct
* is to create it using [default_msm_config](@ref default_msm_config) function and then you'll hopefully only need to
* change a small number of default values for each of your MSMs.
*/
struct MSMConfig {
device_context::DeviceContext ctx; /**< Details related to the device such as its id and stream id. */
int points_size; /**< Number of points in the MSM. If a batch of MSMs needs to be computed, this should be
* a number of different points. So, if each MSM re-uses the same set of points, this
* variable is set equal to the MSM size. And if every MSM uses a distinct set of
* points, it should be set to the product of MSM size and [batch_size](@ref
* batch_size). Default value: 0 (meaning it's equal to the MSM size). */
int precompute_factor; /**< The number of extra points to pre-compute for each point. See the
* [precompute_msm_points](@ref precompute_msm_points) function, `precompute_factor` passed
* there needs to be equal to the one used here. Larger values decrease the
* number of computations to make, on-line memory footprint, but increase the static
* memory footprint. Default value: 1 (i.e. don't pre-compute). */
int c; /**< \f$ c \f$ value, or "window bitsize" which is the main parameter of the "bucket
* method" that we use to solve the MSM problem. As a rule of thumb, larger value
* means more on-line memory footprint but also more parallelism and less computational
* complexity (up to a certain point). Currently pre-computation is independent of
* \f$ c \f$, however in the future value of \f$ c \f$ here and the one passed into the
* [precompute_msm_points](@ref precompute_msm_points) function will need to be identical.
* Default value: 0 (the optimal value of \f$ c \f$ is chosen automatically). */
int bitsize; /**< Number of bits of the largest scalar. Typically equals the bitsize of scalar field,
* but if a different (better) upper bound is known, it should be reflected in this
* variable. Default value: 0 (set to the bitsize of scalar field). */
int large_bucket_factor; /**< Variable that controls how sensitive the algorithm is to the buckets that occur
* very frequently. Useful for efficient treatment of non-uniform distributions of
* scalars and "top windows" with few bits. Can be set to 0 to disable separate
* treatment of large buckets altogether. Default value: 10. */
int batch_size; /**< The number of MSMs to compute. Default value: 1. */
bool are_scalars_on_device; /**< True if scalars are on device and false if they're on host. Default value:
* false. */
bool are_scalars_montgomery_form; /**< True if scalars are in Montgomery form and false otherwise. Default value:
* true. */
bool are_points_on_device; /**< True if points are on device and false if they're on host. Default value: false. */
bool are_points_montgomery_form; /**< True if coordinates of points are in Montgomery form and false otherwise.
* Default value: true. */
bool are_results_on_device; /**< True if the results should be on device and false if they should be on host. If set
* to false, `is_async` won't take effect because a synchronization is needed to
* transfer results to the host. Default value: false. */
bool is_big_triangle; /**< Whether to do "bucket accumulation" serially. Decreases computational complexity
* but also greatly decreases parallelism, so only suitable for large batches of MSMs.
* Default value: false. */
bool is_async; /**< Whether to run the MSM asynchronously. If set to true, the MSM function will be
* non-blocking and you'd need to synchronize it explicitly by running
* `cudaStreamSynchronize` or `cudaDeviceSynchronize`. If set to false, the MSM
* function will block the current CPU thread. */
};
```
## Choosing optimal parameters
`is_big_triangle` should be `false` in almost all cases. It might provide better results only for very small MSMs (smaller than 2^8^) with a large batch (larger than 100) but this should be tested per scenario.
Large buckets exist in two cases:
1. When the scalar distribution isn't uniform.
2. When `c` does not divide the scalar bit-size.
`large_bucket_factor` that is equal to 10 yields good results for most cases, but it's best to fine tune this parameter per `c` and per scalar distribution.
The two most important parameters for performance are `c` and the `precompute_factor`. They affect the number of EC additions as well as the memory size. When the points are not known in advance we cannot use precomputation. In this case the best `c` value is usually around $log_2(msmSize) - 4$. However, in most protocols the points are known in advance and precomputation can be used unless limited by memory. Usually it's best to use maximum precomputation (such that we end up with only a single bucket module) combined with a `c` value around $log_2(msmSize) - 1$.
## Memory usage estimation
The main memory requirements of the MSM are the following:
- Scalars - `sizeof(scalar_t) * msm_size * batch_size`
- Scalar indices - `~6 * sizeof(unsigned) * nof_bucket_modules * msm_size * batch_size`
- Points - `sizeof(affine_t) * msm_size * precomp_factor * batch_size`
- Buckets - `sizeof(projective_t) * nof_bucket_modules * 2^c * batch_size`
where `nof_bucket_modules = ceil(ceil(bitsize / c) / precompute_factor)`
During the MSM computation first the memory for scalars and scalar indices is allocated, then the indices are freed and points and buckets are allocated. This is why a good estimation for the required memory is the following formula:
$max(scalars + scalarIndices, scalars + points + buckets)$
This gives a good approximation within 10% of the actual required memory for most cases.
## Example parameters
Here is a useful table showing optimal parameters for different MSMs. They are optimal for BLS12-377 curve when running on NVIDIA GeForce RTX 3090 Ti. This is the configuration used:
```cpp
msm::MSMConfig config = {
ctx, // DeviceContext
N, // points_size
precomp_factor, // precompute_factor
user_c, // c
0, // bitsize
10, // large_bucket_factor
batch_size, // batch_size
false, // are_scalars_on_device
false, // are_scalars_montgomery_form
true, // are_points_on_device
false, // are_points_montgomery_form
true, // are_results_on_device
false, // is_big_triangle
true // is_async
};
```
Here are the parameters and the results for the different cases:
| MSM size | Batch size | Precompute factor | c | Memory estimation (GB) | Actual memory (GB) | Single MSM time (ms) |
| --- | --- | --- | --- | --- | --- | --- |
| 10 | 1 | 1 | 9 | 0.00227 | 0.00277 | 9.2 |
| 10 | 1 | 23 | 11 | 0.00259 | 0.00272 | 1.76 |
| 10 | 1000 | 1 | 7 | 0.94 | 1.09 | 0.051 |
| 10 | 1000 | 23 | 11 | 2.59 | 2.74 | 0.025 |
| 15 | 1 | 1 | 11 | 0.011 | 0.019 | 9.9 |
| 15 | 1 | 16 | 16 | 0.061 | 0.065 | 2.4 |
| 15 | 100 | 1 | 11 | 1.91 | 1.92 | 0.84 |
| 15 | 100 | 19 | 14 | 6.32 | 6.61 | 0.56 |
| 18 | 1 | 1 | 14 | 0.128 | 0.128 | 14.4 |
| 18 | 1 | 15 | 17 | 0.40 | 0.42 | 5.9 |
| 22 | 1 | 1 | 17 | 1.64 | 1.65 | 68 |
| 22 | 1 | 13 | 21 | 5.67 | 5.94 | 54 |
| 24 | 1 | 1 | 18 | 6.58 | 6.61 | 232 |
| 24 | 1 | 7 | 21 | 12.4 | 13.4 | 199 |
The optimal values can vary per GPU and per curve. It is best to try a few combinations until you get the best results for your specific case.

View File

@@ -0,0 +1,159 @@
# NTT - Number Theoretic Transform
The Number Theoretic Transform (NTT) is a variant of the Fourier Transform used over finite fields, particularly those of integers modulo a prime number. NTT operates in a discrete domain and is used primarily in applications requiring modular arithmetic, such as cryptography and polynomial multiplication.
NTT is defined similarly to the Discrete Fourier Transform (DFT), but instead of using complex roots of unity, it uses roots of unity within a finite field. The definition hinges on the properties of the finite field, specifically the existence of a primitive root of unity of order $N$ (where $N$ is typically a power of 2), and the modulo operation is performed with respect to a specific prime number that supports these roots.
Formally, given a sequence of integers $a_0, a_1, ..., a_{N-1}$, the NTT of this sequence is another sequence of integers $A_0, A_1, ..., A_{N-1}$, computed as follows:
$$
A_k = \sum_{n=0}^{N-1} a_n \cdot \omega^{nk} \mod p
$$
where:
- $N$ is the size of the input sequence and is a power of 2,
- $p$ is a prime number such that $p = kN + 1$ for some integer $k$, ensuring that $p$ supports the existence of $N$th roots of unity,
- $\omega$ is a primitive $N$th root of unity modulo $p$, meaning $\omega^N \equiv 1 \mod p$ and no smaller positive power of $\omega$ is congruent to 1 modulo $p$,
- $k$ ranges from 0 to $N-1$, and it indexes the output sequence.
NTT is particularly useful because it enables efficient polynomial multiplication under modulo arithmetic, crucial for algorithms in cryptographic protocols and other areas requiring fast modular arithmetic operations.
There exists also INTT which is the inverse operation of NTT. INTT can take as input an output sequence of integers from an NTT and reconstruct the original sequence.
## Using NTT
### Supported Bindings
- [Golang](../golang-bindings/ntt.md)
- [Rust](../rust-bindings/ntt.md)
### Examples
- [Rust API examples](https://github.com/ingonyama-zk/icicle/blob/d84ffd2679a4cb8f8d1ac2ad2897bc0b95f4eeeb/examples/rust/ntt/src/main.rs#L1)
- [C++ API examples](https://github.com/ingonyama-zk/icicle/blob/d84ffd2679a4cb8f8d1ac2ad2897bc0b95f4eeeb/examples/c%2B%2B/ntt/example.cu#L1)
### Ordering
The `Ordering` enum defines how inputs and outputs are arranged for the NTT operation, offering flexibility in handling data according to different algorithmic needs or compatibility requirements. It primarily affects the sequencing of data points for the transform, which can influence both performance and the compatibility with certain algorithmic approaches. The available ordering options are:
- **`kNN` (Natural-Natural):** Both inputs and outputs are in their natural order. This is the simplest form of ordering, where data is processed in the sequence it is given, without any rearrangement.
- **`kNR` (Natural-Reversed):** Inputs are in natural order, while outputs are in bit-reversed order. This ordering is typically used in algorithms that benefit from having the output in a bit-reversed pattern.
- **`kRN` (Reversed-Natural):** Inputs are in bit-reversed order, and outputs are in natural order. This is often used with the Cooley-Tukey FFT algorithm.
- **`kRR` (Reversed-Reversed):** Both inputs and outputs are in bit-reversed order.
- **`kNM` (Natural-Mixed):** Inputs are provided in their natural order, while outputs are arranged in a digit-reversed (mixed) order. This ordering is good for mixed radix NTT operations, where the mixed or digit-reversed ordering of outputs is a generalization of the bit-reversal pattern seen in simpler, radix-2 cases.
- **`kMN` (Mixed-Natural):** Inputs are in a digit-reversed (mixed) order, while outputs are restored to their natural order. This ordering would primarily be used for mixed radix NTT
Choosing an algorithm is heavily dependent on your use case. For example Cooley-Tukey will often use `kRN` and Gentleman-Sande often uses `kNR`.
### Modes
NTT also supports two different modes `Batch NTT` and `Single NTT`
Deciding whether to use `batch NTT` vs `single NTT` is highly dependent on your application and use case.
#### Single NTT
Single NTT will launch a single NTT computation.
Choose this mode when your application requires processing individual NTT operations in isolation.
#### Batch NTT Mode
Batch NTT allows you to run many NTTs with a single API call. Batch NTT mode can significantly reduce read/write times as well as computation overhead by executing multiple NTT operations in parallel. Batch mode may also offer better utilization of computational resources (memory and compute).
## Supported algorithms
Our NTT implementation supports two algorithms `radix-2` and `mixed-radix`.
### Radix 2
At its core, the Radix-2 NTT algorithm divides the problem into smaller sub-problems, leveraging the properties of "divide and conquer" to reduce the overall computational complexity. The algorithm operates on sequences whose lengths are powers of two.
1. **Input Preparation:**
The input is a sequence of integers $a_0, a_1, \ldots, a_{N-1}, \text{ where } N$ is a power of two.
2. **Recursive Decomposition:**
The algorithm recursively divides the input sequence into smaller sequences. At each step, it separates the sequence into even-indexed and odd-indexed elements, forming two subsequences that are then processed independently.
3. **Butterfly Operations:**
The core computational element of the Radix-2 NTT is the "butterfly" operation, which combines pairs of elements from the sequences obtained in the decomposition step.
Each butterfly operation involves multiplication by a "twiddle factor," which is a root of unity in the finite field, and addition or subtraction of the results, all performed modulo the prime modulus.
$$
X_k = (A_k + B_k \cdot W^k) \mod p
$$
$X_k$ - The output of the butterfly operation for the $k$-th element
$A_k$ - an element from the even-indexed subset
$B_k$ - an element from the odd-indexed subset
$p$ - prime modulus
$k$ - The index of the current operation within the butterfly or the transform stage
The twiddle factors are precomputed to save runtime and improve performance.
4. **Bit-Reversal Permutation:**
A final step involves rearranging the output sequence into the correct order. Due to the halving process in the decomposition steps, the elements of the transformed sequence are initially in a bit-reversed order. A bit-reversal permutation is applied to obtain the final sequence in natural order.
### Mixed Radix
The Mixed Radix NTT algorithm extends the concepts of the Radix-2 algorithm by allowing the decomposition of the input sequence based on various factors of its length. Specifically ICICLEs implementation splits the input into blocks of sizes 16, 32, or 64 compared to radix2 which is always splitting such that we end with NTT of size 2. This approach offers enhanced flexibility and efficiency, especially for input sizes that are composite numbers, by leveraging the "divide and conquer" strategy across multiple radices.
The NTT blocks in Mixed Radix are implemented more efficiently based on winograd NTT but also optimized memory and register usage is better compared to Radix-2.
Mixed Radix can reduce the number of stages required to compute for large inputs.
1. **Input Preparation:**
The input to the Mixed Radix NTT is a sequence of integers $a_0, a_1, \ldots, a_{N-1}$, where $N$ is not strictly required to be a power of two. Instead, $N$ can be any composite number, ideally factorized into primes or powers of primes.
2. **Factorization and Decomposition:**
Unlike the Radix-2 algorithm, which strictly divides the computational problem into halves, the Mixed Radix NTT algorithm implements a flexible decomposition approach which isn't limited to prime factorization.
For example, an NTT of size 256 can be decomposed into two stages of $16 \times \text{NTT}_{16}$, leveraging a composite factorization strategy rather than decomposing into eight stages of $\text{NTT}_{2}$. This exemplifies the use of composite factors (in this case, $256 = 16 \times 16$) to apply smaller NTT transforms, optimizing computational efficiency by adapting the decomposition strategy to the specific structure of $N$.
3. **Butterfly Operations with Multiple Radices:**
The Mixed Radix algorithm utilizes butterfly operations for various radix sizes. Each sub-transform involves specific butterfly operations characterized by multiplication with twiddle factors appropriate for the radix in question.
The generalized butterfly operation for a radix-$r$ element can be expressed as:
$$
X_{k,r} = \sum_{j=0}^{r-1} (A_{j,k} \cdot W^{jk}) \mod p
$$
where:
$X_{k,r}$ - is the output of the $radix-r$ butterfly operation for the $k-th$ set of inputs
$A_{j,k}$ - represents the $j-th$ input element for the $k-th$ operation
$W$ - is the twiddle factor
$p$ - is the prime modulus
4. **Recombination and Reordering:**
After applying the appropriate butterfly operations across all decomposition levels, the Mixed Radix algorithm recombines the results into a single output sequence. Due to the varied sizes of the sub-transforms, a more complex reordering process may be required compared to Radix-2. This involves digit-reversal permutations to ensure that the final output sequence is correctly ordered.
### Which algorithm should I choose ?
Both work only on inputs of power of 2 (e.g., 256, 512, 1024).
Radix 2 is faster for small NTTs. A small NTT would be around logN = 16 and batch size 1. Radix 2 won't necessarily perform better for smaller `logn` with larger batches.
Mixed radix on the other hand works better for larger NTTs with larger input sizes.
Performance really depends on logn size, batch size, ordering, inverse, coset, coeff-field and which GPU you are using.
For this reason we implemented our [heuristic auto-selection](https://github.com/ingonyama-zk/icicle/blob/main/icicle/src/ntt/ntt.cu#L573) which should choose the most efficient algorithm in most cases.
We still recommend you benchmark for your specific use case if you think a different configuration would yield better results.

View File

@@ -0,0 +1,12 @@
# ICICLE Primitives
This section of the documentation is dedicated to the ICICLE primitives, we will cover the usage and internal details of our primitives such as hashing algorithms, MSM and NTT.
## Supported primitives
- [MSM](./msm.md)
- [NTT](./ntt.md)
- [Keccak Hash](./keccak.md)
- [Poseidon Hash](./poseidon.md)

View File

@@ -0,0 +1,216 @@
# Poseidon
[Poseidon](https://eprint.iacr.org/2019/458.pdf) is a popular hash in the ZK ecosystem primarily because it's optimized to work over large prime fields, a common setting for ZK proofs, thereby minimizing the number of multiplicative operations required.
Poseidon has also been specifically designed to be efficient when implemented within ZK circuits, Poseidon uses far less constraints compared to other hash functions like Keccak or SHA-256 in the context of ZK circuits.
Poseidon has been used in many popular ZK protocols such as Filecoin and [Plonk](https://drive.google.com/file/d/1bZZvKMQHaZGA4L9eZhupQLyGINkkFG_b/view?usp=drive_open).
Our implementation of Poseidon is implemented in accordance with the optimized [Filecoin version](https://spec.filecoin.io/algorithms/crypto/poseidon/).
Lets understand how Poseidon works.
## Initialization
Poseidon starts with the initialization of its internal state, which is composed of the input elements and some pre-generated constants. An initial round constant is added to each element of the internal state. Adding the round constants ensures the state is properly mixed from the beginning.
This is done to prevent collisions and to prevent certain cryptographic attacks by ensuring that the internal state is sufficiently mixed and unpredictable.
![Poseidon initialization of internal state added with pre-generated round constants](https://github.com/ingonyama-zk/icicle/assets/122266060/52257f5d-6097-47c4-8f17-7b6449b9d162)
## Applying full and partial rounds
To generate a secure hash output, the algorithm goes through a series of "full rounds" and "partial rounds" as well as transformations between these sets of rounds in the following order:
```First full rounds -> apply S-box and Round constants -> partial rounds -> Last full rounds -> Apply S-box```
### Full rounds
![Full round iterations consisting of S box operations, adding round constants, and a Full MDS matrix multiplication](https://github.com/ingonyama-zk/icicle/assets/122266060/e4ce0e98-b90b-4261-b83e-3cd8cce069cb)
**Uniform Application of S-box:** In full rounds, the S-box (a non-linear transformation) is applied uniformly to every element of the hash function's internal state. This ensures a high degree of mixing and diffusion, contributing to the hash function's security. The functions S-box involves raising each element of the state to a certain power denoted by `α` a member of the finite field defined by the prime `p`; `α` can be different depending on the implementation and user configuration.
**Linear Transformation:** After applying the S-box, a linear transformation is performed on the state. This involves multiplying the state by a MDS (Maximum Distance Separable) Matrix. which further diffuses the transformations applied by the S-box across the entire state.
**Addition of Round Constants:** Each element of the state is then modified by adding a unique round constant. These constants are different for each round and are precomputed as part of the hash function's initialization. The addition of round constants ensures that even minor changes to the input produce significant differences in the output.
### Partial Rounds
![Partial round iterations consisting of selective S box operation, adding a round constant and performing an MDS multiplication with a sparse matrix](https://github.com/ingonyama-zk/icicle/assets/122266060/e8c198b4-7aa4-4b4d-9ec4-604e39e07692)
**Selective Application of S-Box:** Partial rounds apply the S-box transformation to only one element of the internal state per round, rather than to all elements. This selective application significantly reduces the computational complexity of the hash function without compromising its security. The choice of which element to apply the S-box to can follow a specific pattern or be fixed, depending on the design of the hash function.
**Linear Transformation and Round Constants:** A linear transformation is performed and round constants are added. The linear transformation in partial rounds can be designed to be less computationally intensive (this is done by using a sparse matrix) than in full rounds, further optimizing the function's efficiency.
The user of Poseidon can often choose how many partial or full rounds he wishes to apply; more full rounds will increase security but degrade performance. The choice and balance are highly dependent on the use case.
## Using Poseidon
ICICLE Poseidon is implemented for GPU and parallelization is performed for each element of the state rather than for each state.
What that means is we calculate multiple hash-sums over multiple pre-images in parallel, rather than going block by block over the input vector.
So for Poseidon of arity 2 and input of size 1024 * 2, we would expect 1024 elements of output. Which means each block would be of size 2 and that would result in 1024 Poseidon hashes being performed.
### Supported Bindings
[`Go`](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/golang/curves/bn254/poseidon/poseidon.go)
[`Rust`](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust/icicle-core/src/poseidon)
### Constants
Poseidon is extremely customizable and using different constants will produce different hashes, security levels and performance results.
We support pre-calculated and optimized constants for each of the [supported curves](../core#supported-curves-and-operations). The constants can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/include/poseidon/constants) and are labeled clearly per curve `<curve_name>_poseidon.h`.
If you wish to generate your own constants you can use our python script which can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/include/poseidon/constants/generate_parameters.py).
Prerequisites:
- Install python 3
- `pip install poseidon-hash`
- `pip install galois==0.3.7`
- `pip install numpy`
You will then need to modify the following values before running the script.
```python
# Modify these
arity = 11 # we support arity 2, 4, 8 and 11.
p = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001 # bls12-381
# p = 0x12ab655e9a2ca55660b44d1e5c37b00159aa76fed00000010a11800000000001 # bls12-377
# p = 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001 # bn254
# p = 0x1ae3a4617c510eac63b05c06ca1493b1a22d9f300f5138f1ef3622fba094800170b5d44300000008508c00000000001 # bw6-761
prime_bit_len = 255
field_bytes = 32
...
# primitive_element = None
primitive_element = 7 # bls12-381
# primitive_element = 22 # bls12-377
# primitive_element = 5 # bn254
# primitive_element = 15 # bw6-761
```
### Rust API
This is the most basic way to use the Poseidon API.
```rust
let test_size = 1 << 10;
let arity = 2u32;
let ctx = get_default_device_context();
let poseidon = Poseidon::load(arity, &ctx).unwrap();
let config = HashConfig::default();
let inputs = vec![F::one(); test_size * arity as usize];
let outputs = vec![F::zero(); test_size];
let mut input_slice = HostOrDeviceSlice::on_host(inputs);
let mut output_slice = HostOrDeviceSlice::on_host(outputs);
poseidon.hash_many::<F>(
&mut input_slice,
&mut output_slice,
test_size as u32,
arity as u32,
1, // Output length
&config,
)
.unwrap();
```
The `HashConfig` can be modified, by default the inputs and outputs are set to be on `Host` for example.
```rust
impl<'a> Default for HashConfig<'a> {
fn default() -> Self {
let ctx = get_default_device_context();
Self {
ctx,
are_inputs_on_device: false,
are_outputs_on_device: false,
is_async: false,
}
}
}
```
In the example above `Poseidon::load(arity, &ctx).unwrap();` is used which will load the correct constants based on arity and curve. It's possible to [generate](#constants) your own constants and load them.
```rust
let ctx = get_default_device_context();
let custom_poseidon = Poseidon::new(
arity, // The arity of poseidon hash. The width will be equal to arity + 1
alpha, // The S-box power
full_rounds_half,
partial_rounds,
round_constants,
mds_matrix,
non_sparse_matrix,
sparse_matrices,
domain_tag,
ctx,
)
.unwrap();
```
## The Tree Builder
The tree builder allows you to build Merkle trees using Poseidon.
You can define both the tree's `height` and its `arity`. The tree `height` determines the number of layers in the tree, including the root and the leaf layer. The `arity` determines how many children each internal node can have.
```rust
use icicle_bn254::tree::Bn254TreeBuilder;
use icicle_bn254::poseidon::Poseidon;
let mut config = TreeBuilderConfig::default();
let arity = 2;
config.arity = arity as u32;
let input_block_len = arity;
let leaves = vec![F::one(); (1 << height) * arity];
let mut digests = vec![F::zero(); merkle_tree_digests_len((height + 1) as u32, arity as u32, 1)];
let leaves_slice = HostSlice::from_slice(&leaves);
let digests_slice = HostSlice::from_mut_slice(&mut digests);
let ctx = device_context::DeviceContext::default();
let hash = Poseidon::load(2, &ctx).unwrap();
let mut config = TreeBuilderConfig::default();
config.keep_rows = 5;
Bn254TreeBuilder::build_merkle_tree(
leaves_slice,
digests_slice,
height,
input_block_len,
&hash,
&hash,
&config,
)
.unwrap();
```
Similar to Poseidon, you can also configure the Tree Builder `TreeBuilderConfig::default()`
- `keep_rows`: The number of rows which will be written to output, 0 will write all rows.
- `are_inputs_on_device`: Have the inputs been loaded to device memory ?
- `is_async`: Should the TreeBuilder run asynchronously? `False` will block the current CPU thread. `True` will require you call `cudaStreamSynchronize` or `cudaDeviceSynchronize` to retrieve the result.
### Benchmarks
We ran the Poseidon tree builder on:
**CPU**: 12th Gen Intel(R) Core(TM) i9-12900K/
**GPU**: RTX 3090 Ti
**Tree height**: 30 (2^29 elements)
The benchmarks include copying data from and to the device.
| Rows to keep parameter | Run time, Icicle | Supranational PC2
| ----------- | ----------- | -----------
| 10 | 9.4 seconds | 13.6 seconds
| 20 | 9.5 seconds | 13.6 seconds
| 29 | 13.7 seconds | 13.6 seconds

View File

@@ -0,0 +1,88 @@
# Poseidon2
[Poseidon2](https://eprint.iacr.org/2023/323) is a recently released optimized version of Poseidon1. The two versions differ in two crucial points. First, Poseidon is a sponge hash function, while Poseidon2 can be either a sponge or a compression function depending on the use case. Secondly, Poseidon2 is instantiated by new and more efficient linear layers with respect to Poseidon. These changes decrease the number of multiplications in the linear layer by up to 90% and the number of constraints in Plonk circuits by up to 70%. This makes Poseidon2 currently the fastest arithmetization-oriented hash function without lookups.
## Using Poseidon2
ICICLE Poseidon2 is implemented for GPU and parallelization is performed for each state.
We calculate multiple hash-sums over multiple pre-images in parallel, rather than going block by block over the input vector.
For example, for Poseidon2 of width 16, input rate 8, output elements 8 and input of size 1024 * 8, we would expect 1024 * 8 elements of output. Which means each input block would be of size 8, resulting in 1024 Poseidon2 hashes being performed.
### Supported Bindings
[`Rust`](https://github.com/ingonyama-zk/icicle/tree/main/wrappers/rust/icicle-core/src/poseidon2)
### Constants
Poseidon2 is also extremely customizable and using different constants will produce different hashes, security levels and performance results.
We support pre-calculated constants for each of the [supported curves](../core#supported-curves-and-operations). The constants can be found [here](https://github.com/ingonyama-zk/icicle/tree/main/icicle/include/poseidon2/constants) and are labeled clearly per curve `<curve_name>_poseidon2.h`.
You can also use your own set of constants as shown [here](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/rust/icicle-fields/icicle-babybear/src/poseidon2/mod.rs#L290)
### Rust API
This is the most basic way to use the Poseidon2 API.
```rust
let test_size = 1 << 10;
let width = 16;
let rate = 8;
let ctx = get_default_device_context();
let poseidon = Poseidon2::load(width, rate, MdsType::Default, DiffusionStrategy::Default, &ctx).unwrap();
let config = HashConfig::default();
let inputs = vec![F::one(); test_size * rate as usize];
let outputs = vec![F::zero(); test_size];
let mut input_slice = HostOrDeviceSlice::on_host(inputs);
let mut output_slice = HostOrDeviceSlice::on_host(outputs);
poseidon.hash_many::<F>(
&mut input_slice,
&mut output_slice,
test_size as u32,
rate as u32,
8, // Output length
&config,
)
.unwrap();
```
In the example above `Poseidon2::load(width, rate, MdsType::Default, DiffusionStrategy::Default, &ctx).unwrap();` is used to load the correct constants based on width and curve. Here, the default MDS matrices and diffusion are used. If you want to get a Plonky3 compliant version, set them to `MdsType::Plonky` and `DiffusionStrategy::Montgomery` respectively.
## The Tree Builder
Similar to Poseidon1, you can use Poseidon2 in a tree builder.
```rust
use icicle_bn254::tree::Bn254TreeBuilder;
use icicle_bn254::poseidon2::Poseidon2;
let mut config = TreeBuilderConfig::default();
let arity = 2;
config.arity = arity as u32;
let input_block_len = arity;
let leaves = vec![F::one(); (1 << height) * arity];
let mut digests = vec![F::zero(); merkle_tree_digests_len((height + 1) as u32, arity as u32, 1)];
let leaves_slice = HostSlice::from_slice(&leaves);
let digests_slice = HostSlice::from_mut_slice(&mut digests);
let ctx = device_context::DeviceContext::default();
let hash = Poseidon2::load(arity, arity, MdsType::Default, DiffusionStrategy::Default, &ctx).unwrap();
let mut config = TreeBuilderConfig::default();
config.keep_rows = 5;
Bn254TreeBuilder::build_merkle_tree(
leaves_slice,
digests_slice,
height,
input_block_len,
&hash,
&hash,
&config,
)
.unwrap();
```

View File

@@ -0,0 +1,87 @@
# Rust bindings
Rust bindings allow you to use ICICLE as a rust library.
`icicle-core` defines all interfaces, macros and common methods.
`icicle-cuda-runtime` defines DeviceContext which can be used to manage a specific GPU as well as wrapping common CUDA methods.
`icicle-curves` implements all interfaces and macros from icicle-core for each curve. For example icicle-bn254 implements curve bn254. Each curve has its own build script which will build the CUDA libraries for that curve as part of the rust-toolchain build.
## Using ICICLE Rust bindings in your project
Simply add the following to your `Cargo.toml`.
```toml
# GPU Icicle integration
icicle-cuda-runtime = { git = "https://github.com/ingonyama-zk/icicle.git" }
icicle-core = { git = "https://github.com/ingonyama-zk/icicle.git" }
icicle-bn254 = { git = "https://github.com/ingonyama-zk/icicle.git" }
```
`icicle-bn254` being the curve you wish to use and `icicle-core` and `icicle-cuda-runtime` contain ICICLE utilities and CUDA wrappers.
If you wish to point to a specific ICICLE branch add `branch = "<name_of_branch>"` or `tag = "<name_of_tag>"` to the ICICLE dependency. For a specific commit add `rev = "<commit_id>"`.
When you build your project ICICLE will be built as part of the build command.
## How do the rust bindings work?
The rust bindings are just rust wrappers for ICICLE Core static libraries which can be compiled. We integrate the compilation of the static libraries into rusts toolchain to make usage seamless and easy. This is achieved by [extending rusts build command](https://github.com/ingonyama-zk/icicle/blob/main/wrappers/rust/icicle-curves/icicle-bn254/build.rs).
```rust
use cmake::Config;
use std::env::var;
fn main() {
println!("cargo:rerun-if-env-changed=CXXFLAGS");
println!("cargo:rerun-if-changed=../../../../icicle");
let cargo_dir = var("CARGO_MANIFEST_DIR").unwrap();
let profile = var("PROFILE").unwrap();
let out_dir = Config::new("../../../../icicle")
.define("BUILD_TESTS", "OFF") //TODO: feature
.define("CURVE", "bn254")
.define("CMAKE_BUILD_TYPE", "Release")
.build_target("icicle")
.build();
println!("cargo:rustc-link-search={}/build", out_dir.display());
println!("cargo:rustc-link-lib=ingo_bn254");
println!("cargo:rustc-link-lib=stdc++");
// println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=cudart");
}
```
## Supported curves, fields and operations
### Supported curves and operations
| Operation\Curve | bn254 | bls12_377 | bls12_381 | bw6-761 | grumpkin |
| --- | :---: | :---: | :---: | :---: | :---: |
| MSM | ✅ | ✅ | ✅ | ✅ | ✅ |
| G2 | ✅ | ✅ | ✅ | ✅ | ❌ |
| NTT | ✅ | ✅ | ✅ | ✅ | ❌ |
| ECNTT | ✅ | ✅ | ✅ | ✅ | ❌ |
| VecOps | ✅ | ✅ | ✅ | ✅ | ✅ |
| Polynomials | ✅ | ✅ | ✅ | ✅ | ❌ |
| Poseidon | ✅ | ✅ | ✅ | ✅ | ✅ |
| Merkle Tree | ✅ | ✅ | ✅ | ✅ | ✅ |
### Supported fields and operations
| Operation\Field | babybear | stark252 |
| --- | :---: | :---: |
| VecOps | ✅ | ✅ |
| Polynomials | ✅ | ✅ |
| NTT | ✅ | ✅ |
| Extension Field | ✅ | ❌ |
### Supported hashes
| Hash | Sizes |
| --- | :---: |
| Keccak | 256, 512 |

View File

@@ -0,0 +1,31 @@
# ECNTT
## ECNTT Method
The `ecntt` function computes the Elliptic Curve Number Theoretic Transform (EC-NTT) or its inverse on a batch of points of a curve.
```rust
pub fn ecntt<C: Curve>(
input: &(impl HostOrDeviceSlice<Projective<C>> + ?Sized),
dir: NTTDir,
cfg: &NTTConfig<C::ScalarField>,
output: &mut (impl HostOrDeviceSlice<Projective<C>> + ?Sized),
) -> IcicleResult<()>
where
C::ScalarField: FieldImpl,
<C::ScalarField as FieldImpl>::Config: ECNTT<C>,
{
// ... function implementation ...
}
```
## Parameters
- **`input`**: The input data as a slice of `Projective<C>`. This represents points on a specific elliptic curve `C`.
- **`dir`**: The direction of the NTT. It can be `NTTDir::kForward` for forward NTT or `NTTDir::kInverse` for inverse NTT.
- **`cfg`**: The NTT configuration object of type `NTTConfig<C::ScalarField>`. This object specifies parameters for the NTT computation, such as the batch size and algorithm to use.
- **`output`**: The output buffer to write the results into. This should be a slice of `Projective<C>` with the same size as the input.
## Return Value
- **`IcicleResult<()>`**: This function returns an `IcicleResult` which is a wrapper type that indicates success or failure of the NTT computation. On success, it contains `Ok(())`.

View File

@@ -0,0 +1,96 @@
# Keccak
## Keccak Example
```rust
use icicle_cuda_runtime::memory::{DeviceVec, HostSlice};
use icicle_hash::keccak::{keccak256, HashConfig};
use rand::{self, Rng};
fn main() {
let mut rng = rand::thread_rng();
let initial_data: Vec<u8> = (0..120).map(|_| rng.gen::<u8>()).collect();
println!("initial data: {}", hex::encode(&initial_data));
let input = HostSlice::<u8>::from_slice(initial_data.as_slice());
let mut output = DeviceVec::<u8>::cuda_malloc(32).unwrap();
let mut config = HashConfig::default();
keccak256(input, initial_data.len() as i32, 1, &mut output[..], &mut config).expect("Failed to execute keccak256 hashing");
let mut output_host = vec![0_u8; 32];
output.copy_to_host(HostSlice::from_mut_slice(&mut output_host[..])).unwrap();
println!("keccak256 result: {}", hex::encode(&output_host));
}
```
## Keccak Methods
```rust
pub fn keccak256(
input: &(impl HostOrDeviceSlice<u8> + ?Sized),
input_block_size: i32,
number_of_blocks: i32,
output: &mut (impl HostOrDeviceSlice<u8> + ?Sized),
config: &mut HashConfig,
) -> IcicleResult<()>
pub fn keccak512(
input: &(impl HostOrDeviceSlice<u8> + ?Sized),
input_block_size: i32,
number_of_blocks: i32,
output: &mut (impl HostOrDeviceSlice<u8> + ?Sized),
config: &mut HashConfig,
) -> IcicleResult<()>
```
### Parameters
- **`input`**: A slice containing the input data for the Keccak256 hash function. It can reside in either host memory or device memory.
- **`input_block_size`**: An integer specifying the size of the input data for a single hash.
- **`number_of_blocks`**: An integer specifying the number of results in the hash batch.
- **`output`**: A slice where the resulting hash will be stored. This slice can be in host or device memory.
- **`config`**: A pointer to a `HashConfig` object, which contains various configuration options for the Keccak256 operation.
### Return Value
- **`IcicleResult`**: Returns a CUDA error code indicating the success or failure of the Keccak256/Keccak512 operation.
## HashConfig
The `HashConfig` structure holds configuration parameters for the Keccak256/Keccak512 operation, allowing customization of its behavior to optimize performance based on the specifics of the operation or the underlying hardware.
```rust
pub struct HashConfig<'a> {
pub ctx: DeviceContext<'a>,
pub are_inputs_on_device: bool,
pub are_outputs_on_device: bool,
pub is_async: bool,
}
```
### Fields
- **`ctx`**: Device context containing details like device id and stream.
- **`are_inputs_on_device`**: Indicates if input data is located on the device.
- **`are_outputs_on_device`**: Indicates if output hash is stored on the device.
- **`is_async`**: If true, runs the Keccak256/Keccak512 operation asynchronously.
### Usage
Example initialization with default settings:
```rust
let default_config = HashConfig::default();
```
Customizing the configuration:
```rust
let custom_config = NTTConfig {
ctx: custom_device_context,
are_inputs_on_device: true,
are_outputs_on_device: true,
is_async: false,
};
```

View File

@@ -0,0 +1,45 @@
# MSM Pre computation
To understand the theory behind MSM pre computation technique refer to Niall Emmart's [talk](https://youtu.be/KAWlySN7Hm8?feature=shared&t=1734).
## `precompute_points`
Precomputes bases for the multi-scalar multiplication (MSM) by extending each base point with its multiples, facilitating more efficient MSM calculations.
```rust
pub fn precompute_points<C: Curve + MSM<C>>(
points: &(impl HostOrDeviceSlice<Affine<C>> + ?Sized),
msm_size: i32,
cfg: &MSMConfig,
output_bases: &mut DeviceSlice<Affine<C>>,
) -> IcicleResult<()>
```
### Parameters
- **`points`**: The original set of affine points (\(P_1, P_2, ..., P_n\)) to be used in the MSM. For batch MSM operations, this should include all unique points concatenated together.
- **`msm_size`**: The size of a single msm in order to determine optimal parameters.
- **`cfg`**: The MSM configuration parameters.
- **`output_bases`**: The output buffer for the extended bases. Its size must be `points.len() * precompute_factor`. This buffer should be allocated on the device for GPU computations.
#### Returns
`Ok(())` if the operation is successful, or an `IcicleResult` error otherwise.
#### Description
This function extends each provided base point $(P)$ with its multiples $(2^lP, 2^{2l}P, ..., 2^{(precompute_factor - 1) \cdot l}P)$, where $(l)$ is a level of precomputation determined by the `precompute_factor`. The extended set of points facilitates faster MSM computations by allowing the MSM algorithm to leverage precomputed multiples of base points, reducing the number of point additions required during the computation.
The precomputation process is crucial for optimizing MSM operations, especially when dealing with large sets of points and scalars. By precomputing and storing multiples of the base points, the MSM function can more efficiently compute the scalar-point multiplications.
#### Example Usage
```rust
let cfg = MSMConfig::default();
let precompute_factor = 4; // Number of points to precompute
let mut extended_bases = HostOrDeviceSlice::cuda_malloc(expected_size).expect("Failed to allocate memory for extended bases");
// Precompute the bases using the specified factor
precompute_points(&points, msm_size, &cfg, &mut extended_bases)
.expect("Failed to precompute bases");
```

View File

@@ -0,0 +1,170 @@
# MSM
## Example
```rust
use icicle_bn254::curve::{CurveCfg, G1Projective, ScalarCfg};
use icicle_core::{curve::Curve, msm, traits::GenerateRandom};
use icicle_cuda_runtime::{memory::HostOrDeviceSlice, stream::CudaStream};
fn main() {
let size: usize = 1 << 10; // Define the number of points and scalars
// Generate random points and scalars
println!("Generating random G1 points and scalars for BN254...");
let points = CurveCfg::generate_random_affine_points(size);
let scalars = ScalarCfg::generate_random(size);
// Wrap points and scalars in HostOrDeviceSlice for MSM
let points_host = HostOrDeviceSlice::Host(points);
let scalars_host = HostOrDeviceSlice::Host(scalars);
// Allocate memory on the CUDA device for MSM results
let mut msm_results: HostOrDeviceSlice<'_, G1Projective> = HostOrDeviceSlice::cuda_malloc(1).expect("Failed to allocate CUDA memory for MSM results");
// Create a CUDA stream for asynchronous execution
let stream = CudaStream::create().expect("Failed to create CUDA stream");
let mut cfg = msm::MSMConfig::default();
cfg.ctx.stream = &stream;
cfg.is_async = true; // Enable asynchronous execution
// Execute MSM on the device
println!("Executing MSM on device...");
msm::msm(&scalars_host, &points_host, &cfg, &mut msm_results).expect("Failed to execute MSM");
// Synchronize CUDA stream to ensure MSM execution is complete
stream.synchronize().expect("Failed to synchronize CUDA stream");
// Optionally, move results to host for further processing or printing
println!("MSM execution complete.");
}
```
## MSM API Overview
```rust
pub fn msm<C: Curve>(
scalars: &HostOrDeviceSlice<C::ScalarField>,
points: &HostOrDeviceSlice<Affine<C>>,
cfg: &MSMConfig,
results: &mut HostOrDeviceSlice<Projective<C>>,
) -> IcicleResult<()>
```
### Parameters
- **`scalars`**: A buffer containing the scalar values to be multiplied with corresponding points.
- **`points`**: A buffer containing the points to be multiplied by the scalars.
- **`cfg`**: MSM configuration specifying additional parameters for the operation.
- **`results`**: A buffer where the results of the MSM operations will be stored.
### MSM Config
```rust
pub struct MSMConfig<'a> {
pub ctx: DeviceContext<'a>,
points_size: i32,
pub precompute_factor: i32,
pub c: i32,
pub bitsize: i32,
pub large_bucket_factor: i32,
batch_size: i32,
are_scalars_on_device: bool,
pub are_scalars_montgomery_form: bool,
are_points_on_device: bool,
pub are_points_montgomery_form: bool,
are_results_on_device: bool,
pub is_big_triangle: bool,
pub is_async: bool,
}
```
- **`ctx: DeviceContext`**: Specifies the device context, device id and the CUDA stream for asynchronous execution.
- **`point_size: i32`**:
- **`precompute_factor: i32`**: Determines the number of extra points to pre-compute for each point, affecting memory footprint and performance.
- **`c: i32`**: The "window bitsize," a parameter controlling the computational complexity and memory footprint of the MSM operation.
- **`bitsize: i32`**: The number of bits of the largest scalar, typically equal to the bit size of the scalar field.
- **`large_bucket_factor: i32`**: Adjusts the algorithm's sensitivity to frequently occurring buckets, useful for non-uniform scalar distributions.
- **`batch_size: i32`**: The number of MSMs to compute in a single batch, for leveraging parallelism.
- **`are_scalars_montgomery_form`**: Set to `true` if scalars are in montgomery form.
- **`are_points_montgomery_form`**: Set to `true` if points are in montgomery form.
- **`are_scalars_on_device: bool`**, **`are_points_on_device: bool`**, **`are_results_on_device: bool`**: Indicate whether the corresponding buffers are on the device memory.
- **`is_big_triangle`**: If `true` MSM will run in Large triangle accumulation if `false` Bucket accumulation will be chosen. Default value: false.
- **`is_async: bool`**: Whether to perform the MSM operation asynchronously.
### Usage
The `msm` function is designed to compute the sum of multiple scalar-point multiplications efficiently. It supports both single MSM operations and batched operations for increased performance. The configuration allows for detailed control over the execution environment and performance characteristics of the MSM operation.
When performing MSM operations, it's crucial to match the size of the `scalars` and `points` arrays correctly and ensure that the `results` buffer is appropriately sized to hold the output. The `MSMConfig` should be set up to reflect the specifics of the operation, including whether the operation should be asynchronous and any device-specific settings.
## How do I toggle between the supported algorithms?
When creating your MSM Config you may state which algorithm you wish to use. `is_big_triangle=true` will activate Large triangle reduction and `is_big_triangle=false` will activate iterative reduction.
```rust
...
let mut cfg_bls12377 = msm::get_default_msm_config::<BLS12377CurveCfg>();
// is_big_triangle will determine which algorithm to use
cfg_bls12377.is_big_triangle = true;
msm::msm(&scalars, &points, &cfg, &mut msm_results).unwrap();
...
```
You may reference the rust code [here](https://github.com/ingonyama-zk/icicle/blob/77a7613aa21961030e4e12bf1c9a78a2dadb2518/wrappers/rust/icicle-core/src/msm/mod.rs#L54).
## How do I toggle between MSM modes?
Toggling between MSM modes occurs automatically based on the number of results you are expecting from the `msm::msm` function. If you are expecting an array of `msm_results`, ICICLE will automatically split `scalars` and `points` into equal parts and run them as multiple MSMs in parallel.
```rust
...
let mut msm_result: HostOrDeviceSlice<'_, G1Projective> = HostOrDeviceSlice::cuda_malloc(1).unwrap();
msm::msm(&scalars, &points, &cfg, &mut msm_result).unwrap();
...
```
In the example above we allocate a single expected result which the MSM method will interpret as `batch_size=1` and run a single MSM.
In the next example, we are expecting 10 results which sets `batch_size=10` and runs 10 MSMs in batch mode.
```rust
...
let mut msm_results: HostOrDeviceSlice<'_, G1Projective> = HostOrDeviceSlice::cuda_malloc(10).unwrap();
msm::msm(&scalars, &points, &cfg, &mut msm_results).unwrap();
...
```
Here is a [reference](https://github.com/ingonyama-zk/icicle/blob/77a7613aa21961030e4e12bf1c9a78a2dadb2518/wrappers/rust/icicle-core/src/msm/mod.rs#L108) to the code which automatically sets the batch size. For more MSM examples have a look [here](https://github.com/ingonyama-zk/icicle/blob/77a7613aa21961030e4e12bf1c9a78a2dadb2518/examples/rust/msm/src/main.rs#L1).
## Parameters for optimal performance
Please refer to the [primitive description](../primitives/msm#choosing-optimal-parameters)
## Support for G2 group
MSM also supports G2 group.
Using MSM in G2 requires a G2 config, and of course your Points should also be G2 Points.
```rust
...
let scalars = HostOrDeviceSlice::Host(upper_scalars[..size].to_vec());
let g2_points = HostOrDeviceSlice::Host(g2_upper_points[..size].to_vec());
let mut g2_msm_results: HostOrDeviceSlice<'_, G2Projective> = HostOrDeviceSlice::cuda_malloc(1).unwrap();
let mut g2_cfg = msm::get_default_msm_config::<G2CurveCfg>();
msm::msm(&scalars, &g2_points, &g2_cfg, &mut g2_msm_results).unwrap();
...
```
Here you can [find an example](https://github.com/ingonyama-zk/icicle/blob/5a96f9937d0a7176d88c766bd3ef2062b0c26c37/examples/rust/msm/src/main.rs#L114) of MSM on G2 Points.

View File

@@ -0,0 +1,202 @@
# Multi GPU APIs
To learn more about the theory of Multi GPU programming refer to [this part](../multi-gpu.md) of documentation.
Here we will cover the core multi GPU apis and a [example](#a-multi-gpu-example)
## A Multi GPU example
In this example we will display how you can
1. Fetch the number of devices installed on a machine
2. For every GPU launch a thread and set an active device per thread.
3. Execute a MSM on each GPU
```rust
...
let device_count = get_device_count().unwrap();
(0..device_count)
.into_par_iter()
.for_each(move |device_id| {
set_device(device_id).unwrap();
// you can allocate points and scalars_d here
let mut cfg = MSMConfig::default_for_device(device_id);
cfg.ctx.stream = &stream;
cfg.is_async = true;
cfg.are_scalars_montgomery_form = true;
msm(&scalars_d, &HostOrDeviceSlice::on_host(points), &cfg, &mut msm_results).unwrap();
// collect and process results
})
...
```
We use `get_device_count` to fetch the number of connected devices, device IDs will be `0, 1, 2, ..., device_count - 1`
[`into_par_iter`](https://docs.rs/rayon/latest/rayon/iter/trait.IntoParallelIterator.html#tymethod.into_par_iter) is a parallel iterator, you should expect it to launch a thread for every iteration.
We then call `set_device(device_id).unwrap();` it should set the context of that thread to the selected `device_id`.
Any data you now allocate from the context of this thread will be linked to the `device_id`. We create our `MSMConfig` with the selected device ID `let mut cfg = MSMConfig::default_for_device(device_id);`, behind the scene this will create for us a `DeviceContext` configured for that specific GPU.
We finally call our `msm` method.
## Device management API
To streamline device management we offer as part of `icicle-cuda-runtime` package methods for dealing with devices.
#### [`set_device`](https://github.com/ingonyama-zk/icicle/blob/e6035698b5e54632f2c44e600391352ccc11cad4/wrappers/rust/icicle-cuda-runtime/src/device.rs#L6)
Sets the current CUDA device by its ID, when calling `set_device` it will set the current thread to a CUDA device.
**Parameters:**
- **`device_id: usize`**: The ID of the device to set as the current device. Device IDs start from 0.
**Returns:**
- **`CudaResult<()>`**: An empty result indicating success if the device is set successfully. In case of failure, returns a `CudaError`.
**Errors:**
- Returns a `CudaError` if the specified device ID is invalid or if a CUDA-related error occurs during the operation.
**Example:**
```rust
let device_id = 0; // Device ID to set
match set_device(device_id) {
Ok(()) => println!("Device set successfully."),
Err(e) => eprintln!("Failed to set device: {:?}", e),
}
```
#### [`get_device_count`](https://github.com/ingonyama-zk/icicle/blob/e6035698b5e54632f2c44e600391352ccc11cad4/wrappers/rust/icicle-cuda-runtime/src/device.rs#L10)
Retrieves the number of CUDA devices available on the machine.
**Returns:**
- **`CudaResult<usize>`**: The number of available CUDA devices. On success, contains the count of CUDA devices. On failure, returns a `CudaError`.
**Errors:**
- Returns a `CudaError` if a CUDA-related error occurs during the retrieval of the device count.
**Example:**
```rust
match get_device_count() {
Ok(count) => println!("Number of devices available: {}", count),
Err(e) => eprintln!("Failed to get device count: {:?}", e),
}
```
#### [`get_device`](https://github.com/ingonyama-zk/icicle/blob/e6035698b5e54632f2c44e600391352ccc11cad4/wrappers/rust/icicle-cuda-runtime/src/device.rs#L15)
Retrieves the ID of the current CUDA device.
**Returns:**
- **`CudaResult<usize>`**: The ID of the current CUDA device. On success, contains the device ID. On failure, returns a `CudaError`.
**Errors:**
- Returns a `CudaError` if a CUDA-related error occurs during the retrieval of the current device ID.
**Example:**
```rust
match get_device() {
Ok(device_id) => println!("Current device ID: {}", device_id),
Err(e) => eprintln!("Failed to get current device: {:?}", e),
}
```
## Device context API
The `DeviceContext` is embedded into `NTTConfig`, `MSMConfig` and `PoseidonConfig`, meaning you can simply pass a `device_id` to your existing config and the same computation will be triggered on a different device.
#### [`DeviceContext`](https://github.com/ingonyama-zk/icicle/blob/e6035698b5e54632f2c44e600391352ccc11cad4/wrappers/rust/icicle-cuda-runtime/src/device_context.rs#L11)
Represents the configuration a CUDA device, encapsulating the device's stream, ID, and memory pool. The default device is always `0`.
```rust
pub struct DeviceContext<'a> {
pub stream: &'a CudaStream,
pub device_id: usize,
pub mempool: CudaMemPool,
}
```
##### Fields
- **`stream: &'a CudaStream`**
A reference to a `CudaStream`. This stream is used for executing CUDA operations. By default, it points to a null stream CUDA's default execution stream.
- **`device_id: usize`**
The index of the GPU currently in use. The default value is `0`, indicating the first GPU in the system.
In some cases assuming `CUDA_VISIBLE_DEVICES` was configured, for example as `CUDA_VISIBLE_DEVICES=2,3,7` in the system with 8 GPUs - the `device_id=0` will correspond to GPU with id 2. So the mapping may not always be a direct reflection of the number of GPUs installed on a system.
- **`mempool: CudaMemPool`**
Represents the memory pool used for CUDA memory allocations. The default is set to a null pointer, which signifies the use of the default CUDA memory pool.
##### Implementation Notes
- The `DeviceContext` structure is cloneable and can be debugged, facilitating easier logging and duplication of contexts when needed.
#### [`DeviceContext::default_for_device(device_id: usize) -> DeviceContext<'static>`](https://github.com/ingonyama-zk/icicle/blob/e6035698b5e54632f2c44e600391352ccc11cad4/wrappers/rust/icicle-cuda-runtime/src/device_context.rs#L30)
Provides a default `DeviceContext` with system-wide defaults, ideal for straightforward setups.
#### Returns
A `DeviceContext` instance configured with:
- The default stream (`null_mut()`).
- The default device ID (`0`).
- The default memory pool (`null_mut()`).
#### Parameters
- **`device_id: usize`**: The ID of the device for which to create the context.
#### Returns
A `DeviceContext` instance with the provided `device_id` and default settings for the stream and memory pool.
#### [`check_device(device_id: i32)`](https://github.com/vhnatyk/icicle/blob/eef6876b037a6b0797464e7cdcf9c1ecfcf41808/wrappers/rust/icicle-cuda-runtime/src/device_context.rs#L42)
Validates that the specified `device_id` matches the ID of the currently active device, ensuring operations are targeted correctly.
#### Parameters
- **`device_id: i32`**: The device ID to verify against the currently active device.
#### Behavior
- **`Panics`** if the `device_id` does not match the active device's ID, preventing cross-device operation errors.
#### Example
```rust
let device_id: i32 = 0; // Example device ID
check_device(device_id);
// Ensures that the current context is correctly set for the specified device ID.
```

View File

@@ -0,0 +1,200 @@
# NTT
## Example
```rust
use icicle_bn254::curve::{ScalarCfg, ScalarField};
use icicle_core::{ntt::{self, NTT}, traits::GenerateRandom};
use icicle_cuda_runtime::{device_context::DeviceContext, memory::HostOrDeviceSlice, stream::CudaStream};
fn main() {
let size = 1 << 12; // Define the size of your input, e.g., 2^10
let icicle_omega = <Bn254Fr as FftField>::get_root_of_unity(
size.try_into()
.unwrap(),
)
// Generate random inputs
println!("Generating random inputs...");
let scalars = HostOrDeviceSlice::Host(ScalarCfg::generate_random(size));
// Allocate memory on CUDA device for NTT results
let mut ntt_results: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::cuda_malloc(size).expect("Failed to allocate CUDA memory");
// Create a CUDA stream
let stream = CudaStream::create().expect("Failed to create CUDA stream");
let ctx = DeviceContext::default(); // Assuming default device context
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
// Configure NTT
let mut cfg = ntt::NTTConfig::default();
cfg.ctx.stream = &stream;
cfg.is_async = true; // Set to true for asynchronous execution
// Execute NTT on device
println!("Executing NTT on device...");
ntt::ntt(&scalars, ntt::NTTDir::kForward, &cfg, &mut ntt_results).expect("Failed to execute NTT");
// Synchronize CUDA stream to ensure completion
stream.synchronize().expect("Failed to synchronize CUDA stream");
// Optionally, move results to host for further processing or verification
println!("NTT execution complete.");
}
```
## NTT API overview
```rust
pub fn ntt<F>(
input: &HostOrDeviceSlice<F>,
dir: NTTDir,
cfg: &NTTConfig<F>,
output: &mut HostOrDeviceSlice<F>,
) -> IcicleResult<()>
```
`ntt:ntt` expects:
- **`input`** - buffer to read the inputs of the NTT from.
- **`dir`** - whether to compute forward or inverse NTT.
- **`cfg`** - config used to specify extra arguments of the NTT.
- **`output`** - buffer to write the NTT outputs into. Must be of the same size as input.
The `input` and `output` buffers can be on device or on host. Being on host means that they will be transferred to device during runtime.
### NTT Config
```rust
pub struct NTTConfig<'a, S> {
pub ctx: DeviceContext<'a>,
pub coset_gen: S,
pub batch_size: i32,
pub columns_batch: bool,
pub ordering: Ordering,
are_inputs_on_device: bool,
are_outputs_on_device: bool,
pub is_async: bool,
pub ntt_algorithm: NttAlgorithm,
}
```
The `NTTConfig` struct is a configuration object used to specify parameters for an NTT instance.
#### Fields
- **`ctx: DeviceContext<'a>`**: Specifies the device context, including the device ID and the stream ID.
- **`coset_gen: S`**: Defines the coset generator used for coset (i)NTTs. By default, this is set to `S::one()`, indicating that no coset is being used.
- **`batch_size: i32`**: Determines the number of NTTs to compute in a single batch. The default value is 1, meaning that operations are performed on individual inputs without batching. Batch processing can significantly improve performance by leveraging parallelism in GPU computations.
- **`columns_batch`**: If true the function will compute the NTTs over the columns of the input matrix and not over the rows. Defaults to `false`.
- **`ordering: Ordering`**: Controls the ordering of inputs and outputs for the NTT operation. This field can be used to specify decimation strategies (in time or in frequency) and the type of butterfly algorithm (Cooley-Tukey or Gentleman-Sande). The ordering is crucial for compatibility with various algorithmic approaches and can impact the efficiency of the NTT.
- **`are_inputs_on_device: bool`**: Indicates whether the input data has been preloaded on the device memory. If `false` inputs will be copied from host to device.
- **`are_outputs_on_device: bool`**: Indicates whether the output data is preloaded in device memory. If `false` outputs will be copied from host to device. If the inputs and outputs are the same pointer NTT will be computed in place.
- **`is_async: bool`**: Specifies whether the NTT operation should be performed asynchronously. When set to `true`, the NTT function will not block the CPU, allowing other operations to proceed concurrently. Asynchronous execution requires careful synchronization to ensure data integrity and correctness.
- **`ntt_algorithm: NttAlgorithm`**: Can be one of `Auto`, `Radix2`, `MixedRadix`.
`Auto` will select `Radix 2` or `Mixed Radix` algorithm based on heuristics.
`Radix2` and `MixedRadix` will force the use of an algorithm regardless of the input size or other considerations. You should use one of these options when you know for sure that you want to
#### Usage
Example initialization with default settings:
```rust
let default_config = NTTConfig::default();
```
Customizing the configuration:
```rust
let custom_config = NTTConfig {
ctx: custom_device_context,
coset_gen: my_coset_generator,
batch_size: 10,
columns_batch: false,
ordering: Ordering::kRN,
are_inputs_on_device: true,
are_outputs_on_device: true,
is_async: false,
ntt_algorithm: NttAlgorithm::MixedRadix,
};
```
### Modes
NTT supports two different modes `Batch NTT` and `Single NTT`
You may toggle between single and batch NTT by simply configure `batch_size` to be larger then 1 in your `NTTConfig`.
```rust
let mut cfg = ntt::get_default_ntt_config::<ScalarField>();
cfg.batch_size = 10 // your ntt using this config will run in batch mode.
```
`batch_size=1` would keep our NTT in single NTT mode.
Deciding weather to use `batch NTT` vs `single NTT` is highly dependent on your application and use case.
### Initializing the NTT Domain
Before performing NTT operations, its necessary to initialize the NTT domain, It only needs to be called once per GPU since the twiddles are cached.
```rust
ScalarCfg::initialize_domain(ScalarField::from_ark(icicle_omega), &ctx, true).unwrap();
```
### `initialize_domain`
```rust
pub fn initialize_domain<F>(primitive_root: F, ctx: &DeviceContext, fast_twiddles: bool) -> IcicleResult<()>
where
F: FieldImpl,
<F as FieldImpl>::Config: NTT<F>;
```
#### Parameters
- **`primitive_root`**: The primitive root of unity, chosen based on the maximum NTT size required for the computations. It must be of an order that is a power of two. This root is used to generate twiddle factors that are essential for the NTT operations.
- **`ctx`**: A reference to a `DeviceContext` specifying which device and stream the computation should be executed on.
#### Returns
- **`IcicleResult<()>`**: Will return an error if the operation fails.
#### Parameters
- **`primitive_root`**: The primitive root of unity, chosen based on the maximum NTT size required for the computations. It must be of an order that is a power of two. This root is used to generate twiddle factors that are essential for the NTT operations.
- **`ctx`**: A reference to a `DeviceContext` specifying which device and stream the computation should be executed on.
#### Returns
- **`IcicleResult<()>`**: Will return an error if the operation fails.
### Releasing the domain
The `release_domain` function is responsible for releasing the resources associated with a specific domain in the CUDA device context.
```rust
pub fn release_domain<F>(ctx: &DeviceContext) -> IcicleResult<()>
where
F: FieldImpl,
<F as FieldImpl>::Config: NTT<F>
```
#### Parameters
- **`ctx`**: A reference to a `DeviceContext` specifying which device and stream the computation should be executed on.
#### Returns
The function returns an `IcicleResult<()>`, which represents the result of the operation. If the operation is successful, the function returns `Ok(())`, otherwise it returns an error.

View File

@@ -0,0 +1,287 @@
# Rust FFI Bindings for Univariate Polynomial
:::note
Please refer to the Polynomials overview page for a deep overview. This section is a brief description of the Rust FFI bindings.
:::
This documentation is designed to provide developers with a clear understanding of how to utilize the Rust bindings for polynomial operations efficiently and effectively, leveraging the robust capabilities of both Rust and C++ in their applications.
## Introduction
The Rust FFI bindings for the Univariate Polynomial serve as a "shallow wrapper" around the underlying C++ implementation. These bindings provide a straightforward Rust interface that directly calls functions from a C++ library, effectively bridging Rust and C++ operations. The Rust layer handles simple interface translations without delving into complex logic or data structures, which are managed on the C++ side. This design ensures efficient data handling, memory management, and execution of polynomial operations directly via C++.
Currently, these bindings are tailored specifically for polynomials where the coefficients, domain, and images are represented as scalar fields.
## Initialization Requirements
Before utilizing any functions from the polynomial API, it is mandatory to initialize the appropriate polynomial backend (e.g., CUDA). Additionally, the NTT (Number Theoretic Transform) domain must also be initialized, as the CUDA backend relies on this for certain operations. Failing to properly initialize these components can result in errors.
:::note
**Field-Specific Initialization Requirement**
The ICICLE library is structured such that each field or curve has its dedicated library implementation. As a result, initialization must be performed individually for each field or curve to ensure the correct setup and functionality of the library.
:::
## Core Trait: `UnivariatePolynomial`
The `UnivariatePolynomial` trait encapsulates the essential functionalities required for managing univariate polynomials in the Rust ecosystem. This trait standardizes the operations that can be performed on polynomials, regardless of the underlying implementation details. It allows for a unified approach to polynomial manipulation, providing a suite of methods that are fundamental to polynomial arithmetic.
### Trait Definition
```rust
pub trait UnivariatePolynomial
where
Self::Field: FieldImpl,
Self::FieldConfig: FieldConfig,
{
type Field: FieldImpl;
type FieldConfig: FieldConfig;
// Methods to create polynomials from coefficients or roots-of-unity evaluations.
fn from_coeffs<S: HostOrDeviceSlice<Self::Field> + ?Sized>(coeffs: &S, size: usize) -> Self;
fn from_rou_evals<S: HostOrDeviceSlice<Self::Field> + ?Sized>(evals: &S, size: usize) -> Self;
// Method to divide this polynomial by another, returning quotient and remainder.
fn divide(&self, denominator: &Self) -> (Self, Self) where Self: Sized;
// Method to divide this polynomial by the vanishing polynomial 'X^N-1'.
fn div_by_vanishing(&self, degree: u64) -> Self;
// Methods to add or subtract a monomial in-place.
fn add_monomial_inplace(&mut self, monomial_coeff: &Self::Field, monomial: u64);
fn sub_monomial_inplace(&mut self, monomial_coeff: &Self::Field, monomial: u64);
// Method to slice the polynomial, creating a sub-polynomial.
fn slice(&self, offset: u64, stride: u64, size: u64) -> Self;
// Methods to return new polynomials containing only the even or odd terms.
fn even(&self) -> Self;
fn odd(&self) -> Self;
// Method to evaluate the polynomial at a given domain point.
fn eval(&self, x: &Self::Field) -> Self::Field;
// Method to evaluate the polynomial over a domain and store the results.
fn eval_on_domain<D: HostOrDeviceSlice<Self::Field> + ?Sized, E: HostOrDeviceSlice<Self::Field> + ?Sized>(
&self,
domain: &D,
evals: &mut E,
);
// Method to evaluate the polynomial over the roots-of-unity domain for power-of-two sized domain
fn eval_on_rou_domain<E: HostOrDeviceSlice<Self::Field> + ?Sized>(&self, domain_log_size: u64, evals: &mut E);
// Method to retrieve a coefficient at a specific index.
fn get_coeff(&self, idx: u64) -> Self::Field;
// Method to copy coefficients into a provided slice.
fn copy_coeffs<S: HostOrDeviceSlice<Self::Field> + ?Sized>(&self, start_idx: u64, coeffs: &mut S);
// Method to get the degree of the polynomial.
fn degree(&self) -> i64;
}
```
## `DensePolynomial` Struct
The DensePolynomial struct represents a dense univariate polynomial in Rust, leveraging a handle to manage its underlying memory within the CUDA device context. This struct acts as a high-level abstraction over complex C++ memory management practices, facilitating the integration of high-performance polynomial operations through Rust's Foreign Function Interface (FFI) bindings.
```rust
pub struct DensePolynomial {
handle: PolynomialHandle,
}
```
### Traits implementation and methods
#### `Drop`
Ensures proper resource management by releasing the CUDA memory when a DensePolynomial instance goes out of scope. This prevents memory leaks and ensures that resources are cleaned up correctly, adhering to Rust's RAII (Resource Acquisition Is Initialization) principles.
#### `Clone`
Provides a way to create a new instance of a DensePolynomial with its own unique handle, thus duplicating the polynomial data in the CUDA context. Cloning is essential since the DensePolynomial manages external resources, which cannot be safely shared across instances without explicit duplication.
#### Operator Overloading: `Add`, `Sub`, `Mul`, `Rem`, `Div`
These traits are implemented for references to DensePolynomial (i.e., &DensePolynomial), enabling natural mathematical operations such as addition (+), subtraction (-), multiplication (*), division (/), and remainder (%). This syntactic convenience allows users to compose complex polynomial expressions in a way that is both readable and expressive.
#### Key Methods
In addition to the traits, the following methods are implemented:
```rust
impl DensePolynomial {
pub fn init_cuda_backend() -> bool {...}
// Returns a mutable slice of the polynomial coefficients on the device
pub fn coeffs_mut_slice(&mut self) -> &mut DeviceSlice<F> {...}
}
```
## Flexible Memory Handling With `HostOrDeviceSlice`
The DensePolynomial API is designed to accommodate a wide range of computational environments by supporting both host and device memory through the `HostOrDeviceSlice` trait. This approach ensures that polynomial operations can be seamlessly executed regardless of where the data resides, making the API highly adaptable and efficient for various hardware configurations.
### Overview of `HostOrDeviceSlice`
The HostOrDeviceSlice is a Rust trait that abstracts over slices of memory that can either be on the host (CPU) or the device (GPU), as managed by CUDA. This abstraction is crucial for high-performance computing scenarios where data might need to be moved between different memory spaces depending on the operations being performed and the specific hardware capabilities available.
### Usage in API Functions
Functions within the DensePolynomial API that deal with polynomial coefficients or evaluations use the HostOrDeviceSlice trait to accept inputs. This design allows the functions to be agnostic of the actual memory location of the data, whether it's in standard system RAM accessible by the CPU or in GPU memory accessible by CUDA cores.
```rust
// Assume `coeffs` could either be in host memory or CUDA device memory
let coeffs: DeviceSlice<F> = DeviceVec::<F>::cuda_malloc(coeffs_len).unwrap();
let p_from_coeffs = PolynomialBabyBear::from_coeffs(&coeffs, coeffs.len());
// Similarly for evaluations from roots of unity
let evals: HostSlice<F> = HostSlice::from_slice(&host_memory_evals);
let p_from_evals = PolynomialBabyBear::from_rou_evals(&evals, evals.len());
// Same applies for any API that accepts HostOrDeviceSlice
```
## Usage
This section outlines practical examples demonstrating how to utilize the `DensePolynomial` Rust API. The API is flexible, supporting multiple scalar fields. Below are examples showing how to use polynomials defined over different fields and perform a variety of operations.
### Initialization and Basic Operations
First, choose the appropriate field implementation for your polynomial operations, initializing the CUDA backend if necessary
```rust
use icicle_babybear::polynomials::DensePolynomial as PolynomialBabyBear;
// Initialize the CUDA backend for polynomial operations
PolynomialBabyBear::init_cuda_backend();
let f = PolynomialBabyBear::from_coeffs(...);
// now use f by calling the implemented traits
// For operations over another field, such as BN254
use icicle_bn254::polynomials::DensePolynomial as PolynomialBn254;
// Use PolynomialBn254 similarly
```
### Creation
Polynomials can be created from coefficients or evaluations:
```rust
let coeffs = ...;
let p_from_coeffs = PolynomialBabyBear::from_coeffs(HostSlice::from_slice(&coeffs), size);
let evals = ...;
let p_from_evals = PolynomialBabyBear::from_rou_evals(HostSlice::from_slice(&evals), size);
```
### Arithmetic Operations
Utilize overloaded operators for intuitive mathematical expressions:
```rust
let add = &f + &g; // Addition
let sub = &f - &g; // Subtraction
let mul = &f * &g; // Multiplication
let mul_scalar = &f * &scalar; // Scalar multiplication
```
### Division and Remainder
Compute quotient and remainder or perform division by a vanishing polynomial:
```rust
let (q, r) = f.divide(&g); // Compute both quotient and remainder
let q = &f / &g; // Quotient
let r = &f % &g; // Remainder
let h = f.div_by_vanishing(N); // Division by V(x) = X^N - 1
```
### Monomial Operations
Add or subtract monomials in-place for efficient polynomial manipulation:
```rust
f.add_monomial_inplace(&three, 1 /*monmoial*/); // Adds 3*x to f
f.sub_monomial_inplace(&one, 0 /*monmoial*/); // Subtracts 1 from f
```
### Slicing
Extract specific components:
```rust
let even = f.even(); // Polynomial of even-indexed terms
let odd = f.odd(); // Polynomial of odd-indexed terms
let arbitrary_slice = f.slice(offset, stride, size);
```
### Evaluate
Evaluate the polynoomial:
```rust
let x = rand(); // Random field element
let f_x = f.eval(&x); // Evaluate f at x
// Evaluate on a predefined domain
let domain = [one, two, three];
let mut host_evals = vec![ScalarField::zero(); domain.len()];
f.eval_on_domain(HostSlice::from_slice(&domain), HostSlice::from_mut_slice(&mut host_evals));
// Evaluate on roots-of-unity-domain
let domain_log_size = 4;
let mut device_evals = DeviceVec::<ScalarField>::cuda_malloc(1 << domain_log_size).unwrap();
f.eval_on_rou_domain(domain_log_size, &mut device_evals[..]);
```
### Read coefficients
Read or copy polynomial coefficients for further processing:
```rust
let x_squared_coeff = f.get_coeff(2); // Coefficient of x^2
// Copy coefficients to a device-specific memory space
let mut device_mem = DeviceVec::<Field>::cuda_malloc(coeffs.len()).unwrap();
f.copy_coeffs(0, &mut device_mem[..]);
```
### Polynomial Degree
Determine the highest power of the variable with a non-zero coefficient:
```rust
let deg = f.degree(); // Degree of the polynomial
```
### Memory Management: Views (rust slices)
Rust enforces correct usage of views at compile time, eliminating the need for runtime checks:
```rust
let mut f = Poly::from_coeffs(HostSlice::from_slice(&coeffs), size);
// Obtain a mutable slice of coefficients as a DeviceSlice
let coeffs_slice_dev = f.coeffs_mut_slice();
// Operations on f are restricted here due to mutable borrow of coeffs_slice_dev
// Compute evaluations or perform other operations directly using the slice
// example: evaluate f on a coset of roots-of-unity. Computing from GPU to HOST/GPU
let mut config: NTTConfig<'_, F> = NTTConfig::default();
config.coset_gen = /*some coset gen*/;
let mut coset_evals = vec![F::zero(); coeffs_slice_dev.len()];
ntt(
coeffs_slice_dev,
NTTDir::kForward,
&config,
HostSlice::from_mut_slice(&mut coset_evals),
)
.unwrap();
// now can f can be borrowed once again
```

View File

@@ -0,0 +1,207 @@
# Vector Operations API
Our vector operations API which is part of `icicle-cuda-runtime` package, includes fundamental methods for addition, subtraction, and multiplication of vectors, with support for both host and device memory.
## Examples
### Addition of Scalars
```rust
use icicle_bn254::curve::{ScalarCfg, ScalarField};
use icicle_core::vec_ops::{add_scalars};
let test_size = 1 << 18;
let a: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(F::Config::generate_random(test_size));
let b: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(F::Config::generate_random(test_size));
let mut result: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(vec![F::zero(); test_size]);
let cfg = VecOpsConfig::default();
add_scalars(&a, &b, &mut result, &cfg).unwrap();
```
### Subtraction of Scalars
```rust
use icicle_bn254::curve::{ScalarCfg, ScalarField};
use icicle_core::vec_ops::{sub_scalars};
let test_size = 1 << 18;
let a: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(F::Config::generate_random(test_size));
let b: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(F::Config::generate_random(test_size));
let mut result: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(vec![F::zero(); test_size]);
let cfg = VecOpsConfig::default();
sub_scalars(&a, &b, &mut result, &cfg).unwrap();
```
### Multiplication of Scalars
```rust
use icicle_bn254::curve::{ScalarCfg, ScalarField};
use icicle_core::vec_ops::{mul_scalars};
let test_size = 1 << 18;
let a: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(F::Config::generate_random(test_size));
let ones: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(vec![F::one(); test_size]);
let mut result: HostOrDeviceSlice<'_, ScalarField> = HostOrDeviceSlice::on_host(vec![F::zero(); test_size]);
let cfg = VecOpsConfig::default();
mul_scalars(&a, &ones, &mut result, &cfg).unwrap();
```
## Vector Operations Configuration
The `VecOpsConfig` struct encapsulates the settings for vector operations, including device context and operation modes.
### `VecOpsConfig`
Defines configuration parameters for vector operations.
```rust
pub struct VecOpsConfig<'a> {
pub ctx: DeviceContext<'a>,
is_a_on_device: bool,
is_b_on_device: bool,
is_result_on_device: bool,
pub is_async: bool,
}
```
#### Fields
- **`ctx: DeviceContext<'a>`**: Specifies the device context for the operation, including the device ID and memory pool.
- **`is_a_on_device`**: Indicates if the first operand vector resides in device memory.
- **`is_b_on_device`**: Indicates if the second operand vector resides in device memory.
- **`is_result_on_device`**: Specifies if the result vector should be stored in device memory.
- **`is_async`**: Enables asynchronous operation. If `true`, operations are non-blocking; otherwise, they block the current thread.
### Default Configuration
`VecOpsConfig` can be initialized with default settings tailored for a specific device:
```rust
let cfg = VecOpsConfig::default();
```
These are the default settings.
```rust
impl<'a> Default for VecOpsConfig<'a> {
fn default() -> Self {
Self::default_for_device(DEFAULT_DEVICE_ID)
}
}
impl<'a> VecOpsConfig<'a> {
pub fn default_for_device(device_id: usize) -> Self {
VecOpsConfig {
ctx: DeviceContext::default_for_device(device_id),
is_a_on_device: false,
is_b_on_device: false,
is_result_on_device: false,
is_async: false,
}
}
}
```
## Vector Operations
Vector operations are implemented through the `VecOps` trait, providing methods for addition, subtraction, and multiplication of vectors.
### `VecOps` Trait
```rust
pub trait VecOps<F> {
fn add(
a: &HostOrDeviceSlice<F>,
b: &HostOrDeviceSlice<F>,
result: &mut HostOrDeviceSlice<F>,
cfg: &VecOpsConfig,
) -> IcicleResult<()>;
fn sub(
a: &HostOrDeviceSlice<F>,
b: &HostOrDeviceSlice<F>,
result: &mut HostOrDeviceSlice<F>,
cfg: &VecOpsConfig,
) -> IcicleResult<()>;
fn mul(
a: &HostOrDeviceSlice<F>,
b: &HostOrDeviceSlice<F>,
result: &mut HostOrDeviceSlice<F>,
cfg: &VecOpsConfig,
) -> IcicleResult<()>;
}
```
#### Methods
All operations are element-wise operations, and the results placed into the `result` param. These operations are not in place.
- **`add`**: Computes the element-wise sum of two vectors.
- **`sub`**: Computes the element-wise difference between two vectors.
- **`mul`**: Performs element-wise multiplication of two vectors.
## MatrixTranspose API Documentation
This section describes the functionality of the `TransposeMatrix` function used for matrix transposition.
The function takes a matrix represented as a 1D slice and transposes it, storing the result in another 1D slice.
### Function
```rust
pub fn transpose_matrix<F>(
input: &HostOrDeviceSlice<F>,
row_size: u32,
column_size: u32,
output: &mut HostOrDeviceSlice<F>,
ctx: &DeviceContext,
on_device: bool,
is_async: bool,
) -> IcicleResult<()>
where
F: FieldImpl,
<F as FieldImpl>::Config: VecOps<F>
```
### Parameters
- **`input`**: A slice representing the input matrix. The slice can be stored on either the host or the device.
- **`row_size`**: The number of rows in the input matrix.
- **`column_size`**: The number of columns in the input matrix.
- **`output`**: A mutable slice to store the transposed matrix. The slice can be stored on either the host or the device.
- **`ctx`**: A reference to the `DeviceContext`, which provides information about the device where the operation will be performed.
- **`on_device`**: A boolean flag indicating whether the inputs and outputs are on the device.
- **`is_async`**: A boolean flag indicating whether the operation should be performed asynchronously.
### Return Value
`Ok(())` if the operation is successful, or an `IcicleResult` error otherwise.
### Example
```rust
use icicle::HostOrDeviceSlice;
use icicle::DeviceContext;
use icicle::FieldImpl;
use icicle::VecOps;
let input: HostOrDeviceSlice<i32> = // ...;
let mut output: HostOrDeviceSlice<i32> = // ...;
let ctx: DeviceContext = // ...;
transpose_matrix(&input, 5, 4, &mut output, &ctx, true, false)
.expect("Failed to transpose matrix");
```
The function takes a matrix represented as a 1D slice, transposes it, and stores the result in another 1D slice. The input and output slices can be stored on either the host or the device, and the operation can be performed synchronously or asynchronously.
The function is generic and can work with any type `F` that implements the `FieldImpl` trait. The `<F as FieldImpl>::Config` type must also implement the `VecOps<F>` trait, which provides the `transpose` method used to perform the actual transposition.
The function returns an `IcicleResult<()>`, indicating whether the operation was successful or not.

47
docs/docs/introduction.md Normal file
View File

@@ -0,0 +1,47 @@
---
slug: /
displayed_sidebar: GettingStartedSidebar
title: ''
---
# Welcome to Ingonyama's Developer Documentation
Ingonyama is a next-generation semiconductor company, focusing on Zero-Knowledge Proof hardware acceleration. We build accelerators for advanced cryptography, unlocking real-time applications. Our focus is on democratizing access to compute intensive cryptography and making it accessible for developers to build on top of.
Currently our flagship products are:
- **ICICLE**:
[ICICLE](https://github.com/ingonyama-zk/icicle) is a fully featured GPU accelerated cryptography library for building ZK provers. ICICLE allows you to accelerate your existing ZK protocols in a matter of hours or implement your protocol from scratch on GPU.
---
## Our current take on hardware acceleration
We believe GPUs are as important for ZK as for AI.
- GPUs are a perfect match for ZK compute - around 97% of ZK protocol runtime is parallel by nature.
- GPUs are simple for developers to use and scale compared to other hardware platforms.
- GPUs are extremely competitive in terms of power / performance and price (3x cheaper compared to FPGAs).
- GPUs are popular and readily available.
For a more in-depth understanding on this topic we suggest you read [our article on the subject](https://www.ingonyama.com/blog/revisiting-paradigm-hardware-acceleration-for-zero-knowledge-proofs).
Despite our current focus on GPUs we are still hard at work developing a ZPU (ZK Processing Unit), with the goal of offering a programmable hardware platform for ZK. To read more about ZPUs we suggest you read this [article](https://medium.com/@ingonyama/zpu-the-zero-knowledge-processing-unit-f886a48e00e0).
## ICICLE
[ICICLE](https://github.com/ingonyama-zk/icicle) is a cryptography library for ZK using GPUs.
ICICLE implements blazing fast cryptographic primitives such as EC operations, MSM, NTT, Poseidon hash and more on GPU.
ICICLE is designed to be easy to use, developers don't have to touch a single line of CUDA code. Our Rust and Golang bindings allow your team to transition from CPU to GPU with minimal changes.
Learn more about ICICLE and GPUs [here][ICICLE-OVERVIEW].
## Get in Touch
If you have any questions, ideas, or are thinking of building something in this space, join the discussion on [Discord]. You can explore our code on [github](https://github.com/ingonyama-zk) or read some of [our research papers](https://github.com/ingonyama-zk/papers).
Follow us on [Twitter](https://x.com/Ingo_zk) and [YouTube](https://www.youtube.com/@ingo_ZK) and sign up for our [mailing list](https://wkf.ms/3LKCbdj) to get our latest announcements.
[ICICLE-OVERVIEW]: ./icicle/overview.md
[Discord]: https://discord.gg/6vYrE7waPj

180
docs/docusaurus.config.js Normal file
View File

@@ -0,0 +1,180 @@
// @ts-check
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
const math = require('remark-math');
const katex = require('rehype-katex');
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'Ingonyama Developer Documentation',
tagline: 'Ingonyama is a next-generation semiconductor company, focusing on Zero-Knowledge Proof hardware acceleration. We build accelerators for advanced cryptography, unlocking real-time applications.',
url: 'https://dev.ingonyama.com/',
baseUrl: '/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/logo.png',
organizationName: 'ingonyama-zk',
projectName: 'developer-docs',
trailingSlash: false,
deploymentBranch: "main",
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
showLastUpdateAuthor: true,
showLastUpdateTime: true,
routeBasePath: '/',
remarkPlugins: [math, require('mdx-mermaid')],
rehypePlugins: [katex],
sidebarPath: require.resolve('./sidebars.js'),
editUrl: 'https://github.com/ingonyama-zk/icicle/tree/main',
},
blog: {
remarkPlugins: [math, require('mdx-mermaid')],
rehypePlugins: [katex],
showReadingTime: true,
editUrl: 'https://github.com/ingonyama-zk/icicle/tree/main',
},
pages: {},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
}),
],
],
stylesheets: [
{
href: 'https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css',
type: 'text/css',
integrity:
'sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM',
crossorigin: 'anonymous',
},
],
scripts: [
{
src: 'https://plausible.io/js/script.js',
'data-domain':'ingonyama.com',
defer: true,
},
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
metadata: [
{name: 'twitter:card', content: 'summary_large_image'},
{name: 'twitter:site', content: '@Ingo_zk'},
{name: 'twitter:title', content: 'Ingonyama Developer Documentation'},
{name: 'twitter:description', content: 'Ingonyama is a next-generation semiconductor company focusing on Zero-Knowledge Proof hardware acceleration...'},
{name: 'twitter:image', content: 'https://dev.ingonyama.com/img/logo.png'},
// title
{name: 'og:title', content: 'Ingonyama Developer Documentation'},
{name: 'og:description', content: 'Ingonyama is a next-generation semiconductor company focusing on Zero-Knowledge Proof hardware acceleration...'},
{name: 'og:image', content: 'https://dev.ingonyama.com/img/logo.png'},
],
hideableSidebar: true,
colorMode: {
defaultMode: 'dark',
respectPrefersColorScheme: false,
},
algolia: {
// The application ID provided by Algolia
appId: 'PZY4KJBBBK',
// Public API key: it is safe to commit it
apiKey: '2cc940a6e0ef5c117f4f44e7f4e6e20b',
indexName: 'ingonyama',
// Optional: see doc section below
contextualSearch: true,
// Optional: Specify domains where the navigation should occur through window.location instead on history.push. Useful when our Algolia config crawls multiple documentation sites and we want to navigate with window.location.href to them.
externalUrlRegex: 'external\\.com|domain\\.com',
// Optional: Replace parts of the item URLs from Algolia. Useful when using the same search index for multiple deployments using a different baseUrl. You can use regexp or string in the `from` param. For example: localhost:3000 vs myCompany.com/docs
replaceSearchResultPathname: {
from: '/docs/', // or as RegExp: /\/docs\//
to: '/',
},
// Optional: Algolia search parameters
searchParameters: {},
// Optional: path for search page that enabled by default (`false` to disable it)
searchPagePath: 'search',
},
navbar: {
title: 'Ingonyama Developer Documentation',
logo: {
alt: 'Ingonyama Logo',
src: 'img/logo.png',
},
items: [
{
position: 'left',
label: 'Docs',
to: '/',
},
{
href: 'https://github.com/ingonyama-zk',
position: 'right',
label: 'GitHub',
},
{
href: 'https://www.ingonyama.com/ingopedia/glossary',
position: 'right',
label: 'Ingopedia',
},
{
type: 'dropdown',
position: 'right',
label: 'Community',
items: [
{
label: 'Discord',
href: 'https://discord.gg/6vYrE7waPj',
},
{
label: 'Twitter',
href: 'https://x.com/Ingo_zk',
},
{
label: 'YouTube',
href: 'https://www.youtube.com/@ingo_ZK'
},
{
label: 'Mailing List',
href: 'https://wkf.ms/3LKCbdj',
}
]
},
],
},
footer: {
copyright: `Copyright © ${new Date().getFullYear()} Ingonyama, Inc. Built with Docusaurus.`,
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
additionalLanguages: ['rust', 'go'],
},
image: 'img/logo.png',
announcementBar: {
id: 'announcement', // Any value that will identify this message.
content:
'<strong>🎉 Read our paper on the Polynomials API in ICICLE v2 by clicking <a target="_blank" rel="noopener noreferrer" href="https://eprint.iacr.org/2024/973">here</a>! 🎉</strong>',
backgroundColor: '#ADD8E6', // Light blue background color.
textColor: '#000000', // Black text color.
isCloseable: true, // Defaults to `true`.
},
}),
};
module.exports = config;

23438
docs/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

48
docs/package.json Normal file
View File

@@ -0,0 +1,48 @@
{
"name": "docusaurus",
"version": "0.0.0",
"private": true,
"description": "Ingonyama - developer docs",
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids",
"dev": "docusaurus start",
"format": "prettier --write '**/*.md'"
},
"dependencies": {
"@docusaurus/core": "2.0.0-beta.18",
"@docusaurus/preset-classic": "2.0.0-beta.18",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.1.1",
"hast-util-is-element": "1.1.0",
"mdx-mermaid": "^1.2.2",
"mermaid": "^9.1.2",
"prism-react-renderer": "^1.3.1",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"rehype-katex": "5",
"remark-math": "3"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"devDependencies": {
"prettier": "^3.2.4"
}
}

10
docs/sandbox.config.json Normal file
View File

@@ -0,0 +1,10 @@
{
"infiniteLoopProtection": true,
"hardReloadOnChange": true,
"view": "browser",
"template": "docusaurus",
"node": "14",
"container": {
"node": "14"
}
}

233
docs/sidebars.js Normal file
View File

@@ -0,0 +1,233 @@
module.exports = {
GettingStartedSidebar: [
{
type: "doc",
label: "Introduction",
id: "introduction",
},
{
type: "category",
label: "ICICLE",
link: {
type: `doc`,
id: 'icicle/overview',
},
collapsed: false,
items: [
{
type: "doc",
label: "Getting started",
id: "icicle/introduction"
},
{
type: "doc",
label: "ICICLE Core",
id: "icicle/core",
},
{
type: "category",
label: "Primitives",
link: {
type: `doc`,
id: 'icicle/primitives/overview',
},
collapsed: true,
items: [
{
type: "doc",
label: "MSM",
id: "icicle/primitives/msm",
},
{
type: "doc",
label: "NTT",
id: "icicle/primitives/ntt",
},
{
type: "doc",
label: "Keccak Hash",
id: "icicle/primitives/keccak",
},
{
type: "doc",
label: "Poseidon Hash",
id: "icicle/primitives/poseidon",
},
{
type: "doc",
label: "Poseidon2 Hash",
id: "icicle/primitives/poseidon2",
},
],
},
{
type: "doc",
label: "Polynomials",
id: "icicle/polynomials/overview",
},
{
type: "doc",
label: "Multi GPU Support",
id: "icicle/multi-gpu",
},
{
type: "category",
label: "Golang bindings",
link: {
type: `doc`,
id: "icicle/golang-bindings",
},
collapsed: true,
items: [
{
type: "category",
label: "MSM",
link: {
type: `doc`,
id: "icicle/golang-bindings/msm",
},
collapsed: true,
items: [
{
type: "doc",
label: "MSM pre computation",
id: "icicle/golang-bindings/msm-pre-computation",
}
]
},
{
type: "doc",
label: "NTT",
id: "icicle/golang-bindings/ntt",
},
{
type: "doc",
label: "EC-NTT",
id: "icicle/golang-bindings/ecntt",
},
{
type: "doc",
label: "Vector operations",
id: "icicle/golang-bindings/vec-ops",
},
{
type: "doc",
label: "Keccak Hash",
id: "icicle/golang-bindings/keccak",
},
{
type: "doc",
label: "Multi GPU Support",
id: "icicle/golang-bindings/multi-gpu",
},
]
},
{
type: "category",
label: "Rust bindings",
link: {
type: `doc`,
id: "icicle/rust-bindings",
},
collapsed: true,
items: [
{
type: "category",
label: "MSM",
link: {
type: `doc`,
id: "icicle/rust-bindings/msm",
},
collapsed: true,
items: [
{
type: "doc",
label: "MSM pre computation",
id: "icicle/rust-bindings/msm-pre-computation",
}
]
},
{
type: "doc",
label: "NTT",
id: "icicle/rust-bindings/ntt",
},
{
type: "doc",
label: "EC-NTT",
id: "icicle/rust-bindings/ecntt",
},
{
type: "doc",
label: "Vector operations",
id: "icicle/rust-bindings/vec-ops",
},
{
type: "doc",
label: "Keccak Hash",
id: "icicle/rust-bindings/keccak",
},
{
type: "doc",
label: "Multi GPU Support",
id: "icicle/rust-bindings/multi-gpu",
},
{
type: "doc",
label: "Polynomials",
id: "icicle/rust-bindings/polynomials",
},
],
},
{
type: "doc",
label: "Google Colab Instructions",
id: "icicle/colab-instructions",
},
{
type: "doc",
label: "ICICLE Provers",
id: "icicle/integrations"
},
]
},
{
type: "doc",
label: "Ingonyama Grant program",
id: "grants",
},
{
type: "doc",
label: "Contributor guide",
id: "contributor-guide",
},
{
type: "category",
label: "Additional Resources",
collapsed: false,
collapsible: false,
items: [
{
type: "link",
label: "YouTube",
href: "https://www.youtube.com/@ingo_ZK"
},
{
type: "link",
label: "Ingonyama Blog",
href: "https://www.ingonyama.com/blog"
},
{
type: "link",
label: "Ingopedia",
href: "https://www.ingonyama.com/ingopedia"
},
{
href: 'https://github.com/ingonyama-zk',
type: "link",
label: 'GitHub',
}
]
}
],
};

59
docs/src/css/custom.css Normal file
View File

@@ -0,0 +1,59 @@
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #FFCB00;
--ifm-color-primary-dark: #FFCB00;
--ifm-color-primary-darker: #FFCB00;
--ifm-color-primary-darkest: #FFCB00;
--ifm-color-primary-light: #FFCB00;
--ifm-color-primary-lighter: #FFCB00;
--ifm-color-primary-lightest: #FFCB00;
--ifm-code-font-size: 95%;
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #FFCB00;
--ifm-color-primary-dark: #FFCB00;
--ifm-color-primary-darker:#FFCB00;
--ifm-color-primary-darkest: #FFCB00;
--ifm-color-primary-light:#FFCB00;
--ifm-color-primary-lighter: #FFCB00;
--ifm-color-primary-lightest: #FFCB00;
}
.docusaurus-highlight-code-line {
background-color: rgba(0, 0, 0, 0.1);
display: block;
margin: 0 calc(-1 * var(--ifm-pre-padding));
padding: 0 var(--ifm-pre-padding);
}
[data-theme='dark'] .docusaurus-highlight-code-line {
background-color: rgba(0, 0, 0, 0.3);
}
/* Mermaid elements must be changed to be visible in dark mode */
[data-theme='dark'] .mermaid .messageLine0, .messageLine1 {
filter: invert(51%) sepia(84%) saturate(405%) hue-rotate(21deg) brightness(94%) contrast(91%) !important;
}
/* NOTE Must be a separate specification from the above or it won't toggle off */
[data-theme='dark'] .mermaid .flowchart-link {
filter: invert(51%) sepia(84%) saturate(405%) hue-rotate(21deg) brightness(94%) contrast(91%) !important;
}
[data-theme='dark'] .mermaid .cluster-label {
filter: invert(51%) sepia(84%) saturate(405%) hue-rotate(21deg) brightness(94%) contrast(91%) !important;
}
[data-theme='dark'] .mermaid .messageText {
stroke:none !important; fill:white !important;
}
/* Our additions */
.anchor {
scroll-margin-top: 50pt;
}

0
docs/static/.nojekyll vendored Normal file
View File

BIN
docs/static/img/apilevels.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

BIN
docs/static/img/colab_change_runtime.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

BIN
docs/static/img/logo.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

BIN
docs/static/img/t4_gpu.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -1,6 +1,6 @@
# ZKContainer
We recommend using [ZKContainer](https://ingonyama.com/blog/Immanuel-ZKDC), where we have already preinstalled all the required dependencies, to run Icicle examples.
We recommend using [ZKContainer](https://www.ingonyama.com/blog/product-announcement-zk-containers), where we have already preinstalled all the required dependencies, to run Icicle examples.
To use our containers you will need [Docker](https://www.docker.com/) and [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html).
In each example directory, ZKContainer files are located in a subdirectory `.devcontainer`.

View File

@@ -1,85 +0,0 @@
# Icicle example: build a Merkle tree using Poseidon hash
## Best-Practices
We recommend to run our examples in [ZK-containers](../../ZK-containers.md) to save your time and mental energy.
## Key-Takeaway
`Icicle` provides CUDA C++ template classes to accelerate Zero Knowledge (ZK) applications, for example, a popular [Poseidon hash function](https://www.poseidon-hash.info/).
Use class `Poseidon` to instantiate and use the hash function
### Instantiate hash function
```c++
Poseidon<BLS12_381::scalar_t> poseidon(arity, stream);
```
**Parameters:**
- **data class:** Here the hash operates on `BLS12_381::scalar_t`, a scalar field of the curve `BLS12-381`.
You can think of field's elements as 32-bytes integers modulo `p`, where `p` is a prime number, specific to this field.
- **arity:** The number of elements in a hashed block.
- **stream:** CUDA streams allow multiple hashes and higher throughput.
### Hash multiple blocks in parallel
```c++
poseidon.hash_blocks(inBlocks, nBlocks, outHashes, hashType, stream);
```
**Parameters:**
- **nBlocks:** number of blocks we hash in parallel.
- **inBlocks:** input array of size `arity*nBlocks`. The blocks are arranged sequentially in the array.
- **outHashes:** output array of size `nBlocks`.
- **HashType:** In this example we use `Poseidon<BLS12_381::scalar_t>::HashType::MerkleTree`.
## What's in the example
1. Define the size of the example: the height of the full binary Merkle tree.
2. Hash blocks in parallel. The tree width determines the number of blocks to hash.
3. Build a Merkle tree from the hashes.
4. Use the tree to generate a membership proof for one of computed hashes.
5. Validate the hash membership.
6. Tamper the hash.
7. Invalidate the membership of the tempered hash.
## Details
### Merkle tree structure
Our Merkle tree is a **full binary tree** stored in a 1D array.
The tree nodes are stored following a level-first traversal of the binary tree.
For a given level, we use offset to number elements from left to right. The node numbers on the figure below correspond to their locations in the array.
```
Tree Level
0 0
/ \
1 2 1
/ \ / \
3 4 5 6 2
1D array representation: {0, 1, 2, 3, 4, 5, 6}
```
### Membership proof structure
We use two arrays:
- position (left/right) of the node along the path toward the root
- hash of a second node with the same parent

View File

@@ -0,0 +1,23 @@
cmake_minimum_required(VERSION 3.18)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CUDA_STANDARD 17)
set(CMAKE_CUDA_STANDARD_REQUIRED TRUE)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH})
else()
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
endif ()
project(example LANGUAGES CUDA CXX)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
set(CMAKE_CUDA_FLAGS_RELEASE "")
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
add_executable(
example
example.cu
)
target_include_directories(example PRIVATE "../../../icicle/include")
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)

View File

@@ -0,0 +1,33 @@
# ICICLE best practices: Concurrent Data Transfer and NTT Computation
The [Number Theoretic Transform (NTT)](https://dev.ingonyama.com/icicle/primitives/ntt) is an integral component of many cryptographic algorithms, such as polynomial multiplication in Zero Knowledge Proofs. The performance bottleneck of NTT on GPUs is the data transfer between the host (CPU) and the device (GPU). In a typical NVIDIA GPU this transfer dominates the total NTT execution time.
## Key-Takeaway
When you have to run several NTTs, consider Concurrent Data Download, Upload, and Computation to improve data bus (PCIe) and GPU utilization, and get better total execution time.
Typically, you concurrently
1. Download the output of a previous NTT back to the host
2. Upload the input for a next NTT on the device
3. Run current NTT
> [!NOTE]
> This approach requires two on-device memory vectors, decreasing the maximum size of NTT by 2x.
## Best-Practices
1. Use three separate CUDA streams for Download, Upload, and Compute operations
2. Use pinned (page-locked) memory on host to speed data bus transfers. Calling `cudaHostAlloc` allocates pinned memory.
3. Use in-place NTT to save on device memory.
## Running the example
To change the default curve BN254, edit `compile.sh` and `CMakeLists.txt`
```sh
./compile.sh
./run.sh
```
To compare with ICICLE baseline (i.e. non-concurrent) NTT, you can run [this example](../ntt/README.md).

View File

@@ -0,0 +1,16 @@
#!/bin/bash
# Exit immediately on error
set -e
mkdir -p build/example
mkdir -p build/icicle
# Configure and build Icicle
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254 -DG2=OFF -DMSM=OFF
cmake --build build/icicle
# Configure and build the example application
cmake -S . -B build/example
cmake --build build/example

View File

@@ -0,0 +1,149 @@
#include <stdio.h>
#include <iostream>
#include <string>
#include <chrono>
#include "curves/params/bn254.cuh"
#include "api/bn254.h"
using namespace bn254;
using namespace ntt;
const std::string curve = "BN254";
typedef scalar_t S;
typedef scalar_t E;
const unsigned max_log_ntt_size = 27;
void initialize_input(const unsigned ntt_size, const unsigned nof_ntts, E* elements)
{
for (unsigned i = 0; i < ntt_size * nof_ntts; i++) {
elements[i] = E::from(i + 1);
}
}
using FpMilliseconds = std::chrono::duration<float, std::chrono::milliseconds::period>;
#define START_TIMER(timer) auto timer##_start = std::chrono::high_resolution_clock::now();
#define END_TIMER(timer, msg) \
printf("%s: %.0f ms\n", msg, FpMilliseconds(std::chrono::high_resolution_clock::now() - timer##_start).count());
int main(int argc, char** argv)
{
cudaDeviceReset();
cudaDeviceProp deviceProperties;
int deviceId = 0;
cudaGetDeviceProperties(&deviceProperties, deviceId);
std::string gpu_full_name = deviceProperties.name;
std::cout << gpu_full_name << std::endl;
std::string gpu_name = gpu_full_name;
std::cout << "Curve: " << curve << std::endl;
S basic_root = S::omega(max_log_ntt_size);
// change these parameters to match the desired NTT size and batch size
const unsigned log_ntt_size = 22;
const unsigned nof_ntts = 16;
std::cout << "log NTT size: " << log_ntt_size << std::endl;
const unsigned ntt_size = 1 << log_ntt_size;
std::cout << "Batch size: " << nof_ntts << std::endl;
// Create separate CUDA streams for overlapping data transfers and kernel execution.
cudaStream_t stream_compute, stream_h2d, stream_d2h;
cudaStreamCreate(&stream_compute);
cudaStreamCreate(&stream_h2d);
cudaStreamCreate(&stream_d2h);
// Create device context for NTT computation
auto ctx_compute = device_context::DeviceContext{
stream_compute, // stream
0, // device_id
0, // mempool
};
// Initialize NTT domain and configuration
bn254_initialize_domain(&basic_root, ctx_compute, /* fast twiddles */ true);
NTTConfig<S> config_compute = default_ntt_config<S>(ctx_compute);
config_compute.ntt_algorithm = NttAlgorithm::MixedRadix;
config_compute.batch_size = nof_ntts;
config_compute.are_inputs_on_device = true;
config_compute.are_outputs_on_device = true;
config_compute.is_async = true;
std::cout << "Concurrent Download, Upload, and Compute In-place NTT" << std::endl;
int nof_blocks = 32;
std::cout << "Number of blocks: " << nof_blocks << std::endl;
int block_size = ntt_size * nof_ntts / nof_blocks;
// on-host pinned data
E* h_inp[2];
E* h_out[2];
for (int i = 0; i < 2; i++) {
cudaHostAlloc((void**)&h_inp[i], sizeof(E) * ntt_size * nof_ntts, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_out[i], sizeof(E) * ntt_size * nof_ntts, cudaHostAllocDefault);
}
// on-device in-place data
// we need two on-device vectors to overlap data transfers with NTT kernel execution
E* d_vec[2];
for (int i = 0; i < 2; i++) {
cudaMalloc((void**)&d_vec[i], sizeof(E) * ntt_size * nof_ntts);
}
// initialize input data
initialize_input(ntt_size, nof_ntts, h_inp[0]);
initialize_input(ntt_size, nof_ntts, h_inp[1]);
cudaEvent_t compute_start, compute_stop;
cudaEventCreate(&compute_start);
cudaEventCreate(&compute_stop);
for (int run = 0; run < 10; run++) {
int vec_compute = run % 2;
int vec_transfer = (run + 1) % 2;
std::cout << "Run: " << run << std::endl;
std::cout << "Compute Vector: " << vec_compute << std::endl;
std::cout << "Transfer Vector: " << vec_transfer << std::endl;
START_TIMER(inplace);
cudaEventRecord(compute_start, stream_compute);
bn254_ntt_cuda(d_vec[vec_compute], ntt_size, NTTDir::kForward, config_compute, d_vec[vec_compute]);
cudaEventRecord(compute_stop, stream_compute);
// we have to delay upload to device relative to download from device by one block: preserve write after read
for (int i = 0; i <= nof_blocks; i++) {
if (i < nof_blocks) {
cudaMemcpyAsync(
&h_out[vec_transfer][i * block_size], &d_vec[vec_transfer][i * block_size], sizeof(E) * block_size,
cudaMemcpyDeviceToHost, stream_d2h);
}
if (i > 0) {
cudaMemcpyAsync(
&d_vec[vec_transfer][(i - 1) * block_size], &h_inp[vec_transfer][(i - 1) * block_size],
sizeof(E) * block_size, cudaMemcpyHostToDevice, stream_h2d);
}
// synchronize upload and download at the end of the block to ensure data integrity
cudaStreamSynchronize(stream_d2h);
cudaStreamSynchronize(stream_h2d);
}
// synchronize compute stream with the end of the computation
cudaEventSynchronize(compute_stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, compute_start, compute_stop);
END_TIMER(inplace, "Concurrent In-Place NTT");
std::cout << "NTT time: " << milliseconds << " ms" << std::endl;
};
// Clean-up
for (int i = 0; i < 2; i++) {
cudaFree(d_vec[i]);
cudaFreeHost(h_inp[i]);
cudaFreeHost(h_out[i]);
}
cudaEventDestroy(compute_start);
cudaEventDestroy(compute_stop);
cudaStreamDestroy(stream_compute);
cudaStreamDestroy(stream_d2h);
cudaStreamDestroy(stream_h2d);
return 0;
}

View File

@@ -0,0 +1,2 @@
#!/bin/bash
./build/example/example

View File

@@ -8,19 +8,16 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
else()
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
endif ()
project(icicle LANGUAGES CUDA CXX)
project(example LANGUAGES CUDA CXX)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
set(CMAKE_CUDA_FLAGS_RELEASE "")
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
# change the path to your Icicle location
include_directories("../../.." "/icicle" "/opt/icicle")
add_executable(
example
example.cu
)
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda-12.0/targets/x86_64-linux/lib/stubs/ )
target_link_libraries(example ${NVML_LIBRARY})
target_include_directories(example PRIVATE "../../../icicle/include")
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_curve_bn254.a)
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)

View File

@@ -1,9 +1,5 @@
# Icicle example: Muli-Scalar Multiplication (MSM)
## Best-Practices
We recommend to run our examples in [ZK-containers](../../ZK-containers.md) to save your time and mental energy.
## Key-Takeaway
`Icicle` provides CUDA C++ template function `MSM` to accelerate [Multi-Scalar Multiplication](https://github.com/ingonyama-zk/ingopedia/blob/master/src/msm.md).
@@ -45,8 +41,8 @@ The configuration is passed to the kernel as a structure of type `msm::MSMConfig
## What's in the example
1. Define the parameters of MSM
2. Generate random inputs on-host
2. Generate random inputs on-host
3. Configure and execute MSM using on-host data
4. Copy inputs on-device
5. Configure and execute MSM using on-device data
6. Repeat the above steps for G2 points

View File

@@ -3,7 +3,13 @@
# Exit immediately on error
set -e
rm -rf build
mkdir -p build
cmake -S . -B build
cmake --build build
mkdir -p build/example
mkdir -p build/icicle
# Configure and build Icicle
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254 -DG2=ON
cmake --build build/icicle
# Configure and build the example application
cmake -S . -B build/example
cmake --build build/example

View File

@@ -2,10 +2,8 @@
#include <iostream>
#include <iomanip>
// include MSM template
#define CURVE_ID 1
#include "icicle/appUtils/msm/msm.cu"
using namespace curve_config;
#include "api/bn254.h"
using namespace bn254;
int main(int argc, char* argv[])
{
@@ -16,41 +14,40 @@ int main(int argc, char* argv[])
unsigned msm_size = 1048576;
std::cout << "MSM size: " << msm_size << std::endl;
int N = batch_size * msm_size;
std::cout << "Part I: use G1 points" << std::endl;
std::cout << "Generating random inputs on-host" << std::endl;
scalar_t* scalars = new scalar_t[N];
affine_t* points = new affine_t[N];
projective_t result;
scalar_t::RandHostMany(scalars, N);
projective_t::RandHostManyAffine(points, N);
scalar_t::rand_host_many(scalars, N);
projective_t::rand_host_many_affine(points, N);
std::cout << "Using default MSM configuration with on-host inputs" << std::endl;
auto config = msm::DefaultMSMConfig();
device_context::DeviceContext ctx = device_context::get_default_device_context();
msm::MSMConfig config = {
ctx, // ctx
0, // points_size
1, // precompute_factor
0, // c
0, // bitsize
10, // large_bucket_factor
1, // batch_size
false, // are_scalars_on_device
false, // are_scalars_montgomery_form
false, // are_points_on_device
false, // are_points_montgomery_form
false, // are_results_on_device
false, // is_big_triangle
false, // is_async
};
config.batch_size = batch_size;
std::cout << "Running MSM kernel" << std::endl;
// Create two events to time the MSM kernel
std::cout << "Running MSM kernel with on-host inputs" << std::endl;
cudaStream_t stream = config.ctx.stream;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record the start event on the stream
cudaEventRecord(start, stream);
// Execute the MSM kernel
msm::MSM<scalar_t, affine_t, projective_t>(scalars, points, msm_size, config, &result);
// Record the stop event on the stream
cudaEventRecord(stop, stream);
// Wait for the stop event to complete
cudaEventSynchronize(stop);
// Calculate the elapsed time between the start and stop events
cudaEventElapsedTime(&time, start, stop);
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Print the elapsed time
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
// Print the result
bn254_msm_cuda(scalars, points, msm_size, config, &result);
std::cout << projective_t::to_affine(result) << std::endl;
std::cout << "Copying inputs on-device" << std::endl;
@@ -69,24 +66,9 @@ int main(int argc, char* argv[])
config.are_points_on_device = true;
std::cout << "Running MSM kernel with on-device inputs" << std::endl;
// Create two events to time the MSM kernel
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record the start event on the stream
cudaEventRecord(start, stream);
// Execute the MSM kernel
msm::MSM<scalar_t, affine_t, projective_t>(scalars_d, points_d, msm_size, config, result_d);
// Record the stop event on the stream
cudaEventRecord(stop, stream);
// Wait for the stop event to complete
cudaEventSynchronize(stop);
// Calculate the elapsed time between the start and stop events
cudaEventElapsedTime(&time, start, stop);
// Destroy the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Print the elapsed time
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(3) << time * 1e-3 << " sec." << std::endl;
bn254_msm_cuda(scalars_d, points_d, msm_size, config, result_d);
// Copy the result back to the host
cudaMemcpy(&result, result_d, sizeof(projective_t), cudaMemcpyDeviceToHost);
// Print the result
@@ -95,6 +77,48 @@ int main(int argc, char* argv[])
cudaFree(scalars_d);
cudaFree(points_d);
cudaFree(result_d);
// Free the host memory, keep scalars for G2 example
delete[] points;
std::cout << "Part II: use G2 points" << std::endl;
std::cout << "Generating random inputs on-host" << std::endl;
// use the same scalars
g2_affine_t* g2_points = new g2_affine_t[N];
g2_projective_t::rand_host_many_affine(g2_points, N);
std::cout << "Reconfiguring MSM to use on-host inputs" << std::endl;
config.are_results_on_device = false;
config.are_scalars_on_device = false;
config.are_points_on_device = false;
g2_projective_t g2_result;
bn254_g2_msm_cuda(scalars, g2_points, msm_size, config, &g2_result);
std::cout << g2_projective_t::to_affine(g2_result) << std::endl;
std::cout << "Copying inputs on-device" << std::endl;
g2_affine_t* g2_points_d;
g2_projective_t* g2_result_d;
cudaMalloc(&scalars_d, sizeof(scalar_t) * N);
cudaMalloc(&g2_points_d, sizeof(g2_affine_t) * N);
cudaMalloc(&g2_result_d, sizeof(g2_projective_t));
cudaMemcpy(scalars_d, scalars, sizeof(scalar_t) * N, cudaMemcpyHostToDevice);
cudaMemcpy(g2_points_d, g2_points, sizeof(g2_affine_t) * N, cudaMemcpyHostToDevice);
std::cout << "Reconfiguring MSM to use on-device inputs" << std::endl;
config.are_results_on_device = true;
config.are_scalars_on_device = true;
config.are_points_on_device = true;
std::cout << "Running MSM kernel with on-device inputs" << std::endl;
bn254_g2_msm_cuda(scalars_d, g2_points_d, msm_size, config, g2_result_d);
cudaMemcpy(&g2_result, g2_result_d, sizeof(g2_projective_t), cudaMemcpyDeviceToHost);
std::cout << g2_projective_t::to_affine(g2_result) << std::endl;
cudaFree(scalars_d);
cudaFree(g2_points_d);
cudaFree(g2_result_d);
delete[] g2_points;
delete[] scalars;
cudaStreamDestroy(stream);
return 0;
}

View File

@@ -1,2 +1,2 @@
#!/bin/bash
./build/example
./build/example/example

View File

@@ -0,0 +1,27 @@
cmake_minimum_required(VERSION 3.18)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CUDA_STANDARD 17)
set(CMAKE_CUDA_STANDARD_REQUIRED TRUE)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH})
else()
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
endif ()
project(icicle LANGUAGES CUDA CXX)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
set(CMAKE_CUDA_FLAGS_RELEASE "")
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
# change the path to your Icicle location
add_executable(
example
example.cu
)
target_include_directories(example PRIVATE "../../../icicle/include")
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_curve_bn254.a)
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
target_link_libraries(example ${NVML_LIBRARY})
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)

View File

@@ -0,0 +1,52 @@
# Icicle example: using multiple GPU to hash large dataset
## Best-Practices
This example builds on [single GPU Poseidon example](../poseidon/README.md) so we recommend to run it first.
## Key-Takeaway
Use `device_context::DeviceContext` variable to select GPU to use.
Use C++ threads to compute `Icicle` primitives on different GPUs in parallel.
## Concise Usage Explanation
1. Include c++ threads
```c++
#include <thread>
```
2. Define a __thread function__. Importantly, device context `ctx` will hold the GPU id.
```c++
void threadPoseidon(device_context::DeviceContext ctx, ...) {...}
```
3. Initialize device contexts for different GPUs
```c++
device_context::DeviceContext ctx0 = device_context::get_default_device_context();
ctx0.device_id=0;
device_context::DeviceContext ctx1 = device_context::get_default_device_context();
ctx1.device_id=1;
```
4. Finally, spawn the threads and wait for their completion
```c++
std::thread thread0(threadPoseidon, ctx0, ...);
std::thread thread1(threadPoseidon, ctx1, ...);
thread0.join();
thread1.join();
```
## What's in the example
This is a **toy** example executing the first step of the Filecoin's Pre-Commit 2 phase: compute $2^{30}$ Poseison hashes for each column of $11 \times 2^{30}$ matrix.
1. Define the size of the example: $2^{30}$ won't fit on a typical machine, so we partition the problem into `nof_partitions`
2. Hash two partitions in parallel on two GPUs
3. Hash two partitions in series on one GPU
4. Compare execution times

View File

@@ -0,0 +1,15 @@
#!/bin/bash
# Exit immediately on error
set -e
mkdir -p build/example
mkdir -p build/icicle
# Configure and build Icicle
cmake -S ../../../icicle/ -B build/icicle -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
cmake --build build/icicle
# Configure and build the example application
cmake -S . -B build/example
cmake --build build/example

View File

@@ -0,0 +1,152 @@
#include <iostream>
#include <thread>
#include <chrono>
#include <nvml.h>
#include "api/bn254.h"
#include "gpu-utils/error_handler.cuh"
#include "poseidon/poseidon.cuh"
#include "hash/hash.cuh"
using namespace poseidon;
using namespace bn254;
void checkCudaError(cudaError_t error)
{
if (error != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(error) << std::endl;
// Handle the error, e.g., exit the program or throw an exception.
}
}
// these global constants go into template calls
const int size_col = 11;
void threadPoseidon(
device_context::DeviceContext ctx,
unsigned size_partition,
scalar_t* layers,
scalar_t* column_hashes,
Poseidon<scalar_t> * poseidon)
{
cudaError_t err_result = CHK_STICKY(cudaSetDevice(ctx.device_id));
if (err_result != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(err_result) << std::endl;
return;
}
HashConfig column_config = default_hash_config(ctx);
cudaError_t err = poseidon->hash_many(layers, column_hashes, (size_t) size_partition, size_col, 1, column_config);
checkCudaError(err);
}
using FpMilliseconds = std::chrono::duration<float, std::chrono::milliseconds::period>;
#define START_TIMER(timer) auto timer##_start = std::chrono::high_resolution_clock::now();
#define END_TIMER(timer, msg) \
printf("%s: %.0f ms\n", msg, FpMilliseconds(std::chrono::high_resolution_clock::now() - timer##_start).count());
#define CHECK_ALLOC(ptr) \
if ((ptr) == nullptr) { \
std::cerr << "Memory allocation for '" #ptr "' failed." << std::endl; \
exit(EXIT_FAILURE); \
}
#define CHECK_ALLOC(ptr) if ((ptr) == nullptr) { \
std::cerr << "Memory allocation for '" #ptr "' failed." << std::endl; \
exit(EXIT_FAILURE); \
}
int main()
{
const unsigned size_row = (1 << 30);
const unsigned nof_partitions = 64;
const unsigned size_partition = size_row / nof_partitions;
// layers is allocated only for one partition, need to reuse for different partitions
const uint32_t size_layers = size_col * size_partition;
nvmlInit();
unsigned int deviceCount;
nvmlDeviceGetCount(&deviceCount);
std::cout << "Available GPUs: " << deviceCount << std::endl;
for (unsigned int i = 0; i < deviceCount; ++i) {
nvmlDevice_t device;
nvmlMemory_t memory;
char name[NVML_DEVICE_NAME_BUFFER_SIZE];
nvmlDeviceGetHandleByIndex(i, &device);
nvmlDeviceGetName(device, name, NVML_DEVICE_NAME_BUFFER_SIZE);
nvmlDeviceGetMemoryInfo(device, &memory);
std::cout << "Device ID: " << i << ", Type: " << name << ", Memory Total/Free (MiB) " << memory.total / 1024 / 1024
<< "/" << memory.free / 1024 / 1024 << std::endl;
}
const unsigned memory_partition = sizeof(scalar_t) * (size_col + 1) * size_partition / 1024 / 1024;
std::cout << "Required Memory (MiB) " << memory_partition << std::endl;
//===============================================================================
// Key: multiple devices are supported by device context
//===============================================================================
device_context::DeviceContext ctx0 = device_context::get_default_device_context();
ctx0.device_id = 0;
device_context::DeviceContext ctx1 = device_context::get_default_device_context();
ctx1.device_id = 1;
std::cout << "Allocate and initialize the memory for layers and hashes" << std::endl;
scalar_t* layers0 = static_cast<scalar_t*>(malloc(size_layers * sizeof(scalar_t)));
CHECK_ALLOC(layers0);
scalar_t s = scalar_t::zero();
for (unsigned i = 0; i < size_col * size_partition; i++) {
layers0[i] = s;
s = s + scalar_t::one();
}
scalar_t* layers1 = static_cast<scalar_t*>(malloc(size_layers * sizeof(scalar_t)));
CHECK_ALLOC(layers1);
s = scalar_t::zero() + scalar_t::one();
for (unsigned i = 0; i < size_col * size_partition; i++) {
layers1[i] = s;
s = s + scalar_t::one();
}
scalar_t* column_hash0 = static_cast<scalar_t*>(malloc(size_partition * sizeof(scalar_t)));
CHECK_ALLOC(column_hash0);
scalar_t* column_hash1 = static_cast<scalar_t*>(malloc(size_partition * sizeof(scalar_t)));
CHECK_ALLOC(column_hash1);
Poseidon<scalar_t> column_poseidon0(size_col, ctx0);
cudaError_t err_result = CHK_STICKY(cudaSetDevice(ctx1.device_id));
if (err_result != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(err_result) << std::endl;
return;
}
Poseidon<scalar_t> column_poseidon1(size_col, ctx1);
std::cout << "Parallel execution of Poseidon threads" << std::endl;
START_TIMER(parallel);
std::thread thread0(threadPoseidon, ctx0, size_partition, layers0, column_hash0, &column_poseidon0);
std::thread thread1(threadPoseidon, ctx1, size_partition, layers1, column_hash1, &column_poseidon1);
// Wait for the threads to finish
thread0.join();
thread1.join();
END_TIMER(parallel, "2 GPUs");
std::cout << "Output Data from Thread 0: ";
std::cout << column_hash0[0] << std::endl;
std::cout << "Output Data from Thread 1: ";
std::cout << column_hash1[0] << std::endl;
std::cout << "Sequential execution of Poseidon threads" << std::endl;
START_TIMER(sequential);
std::thread thread2(threadPoseidon, ctx0, size_partition, layers0, column_hash0, &column_poseidon0);
thread2.join();
std::thread thread3(threadPoseidon, ctx0, size_partition, layers1, column_hash1, &column_poseidon0);
thread3.join();
END_TIMER(sequential, "1 GPU");
std::cout << "Output Data from Thread 2: ";
std::cout << column_hash0[0] << std::endl;
std::cout << "Output Data from Thread 3: ";
std::cout << column_hash1[0] << std::endl;
nvmlShutdown();
return 0;
}

View File

@@ -0,0 +1,2 @@
#!/bin/bash
./build/example/example

View File

@@ -8,16 +8,17 @@ if (${CMAKE_VERSION} VERSION_LESS "3.24.0")
else()
set(CMAKE_CUDA_ARCHITECTURES native) # on 3.24+, on earlier it is ignored, and the target is not passed
endif ()
project(icicle LANGUAGES CUDA CXX)
project(example LANGUAGES CUDA CXX)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
set(CMAKE_CUDA_FLAGS_RELEASE "")
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G -O0")
# change the path to your Icicle location
add_executable(
example
example.cu
)
target_include_directories(example PRIVATE "../../../icicle/include")
target_link_libraries(example ${CMAKE_SOURCE_DIR}/build/icicle/lib/libingo_field_bn254.a)
find_library(NVML_LIBRARY nvidia-ml PATHS /usr/local/cuda/targets/x86_64-linux/lib/stubs/ )
target_link_libraries(example ${NVML_LIBRARY})
set_target_properties(example PROPERTIES CUDA_SEPARABLE_COMPILATION ON)

View File

@@ -1,9 +1,5 @@
# Icicle example: Multiplication
## Best-Practices
We recommend to run our examples in [ZK-containers](../../ZK-containers.md) to save your time and mental energy.
## Key-Takeaway
`Icicle` accelerates multiplication operation `*` using [Karatsuba algorithm](https://en.wikipedia.org/wiki/Karatsuba_algorithm)
@@ -14,10 +10,10 @@ Define a `CURVE_ID` and include curve configuration header:
```c++
#define CURVE_ID 1
#include "icicle/curves/curve_config.cuh"
#include "curves/curve_config.cuh"
```
The values of `CURVE_ID` for different curves are in the abobe header. Multiplication is accelerated both for field scalars and point fields.
The values of `CURVE_ID` for different curves are in the above header. Multiplication is accelerated both for field scalars and point fields.
```c++
using namespace curve_config;
@@ -25,6 +21,12 @@ scalar_t a;
point_field_t b;
```
## Running the example
- `cd` to your example directory
- compile with `./compile.sh`
- run with `./run.sh`
## What's in the example
1. Define the parameters for the example such as vector size
@@ -32,3 +34,4 @@ point_field_t b;
3. Copy them on-device
4. Execute element-wise vector multiplication on-device
5. Copy results on-host

View File

@@ -3,7 +3,13 @@
# Exit immediately on error
set -e
rm -rf build
mkdir -p build
cmake -S . -B build
cmake --build build
mkdir -p build/example
mkdir -p build/icicle
# Configure and build Icicle
cmake -S ../../../icicle/ -B build/icicle -DMSM=OFF -DCMAKE_BUILD_TYPE=Release -DCURVE=bn254
cmake --build build/icicle
# Configure and build the example application
cmake -S . -B build/example
cmake --build build/example

View File

@@ -1,42 +1,34 @@
#include <iostream>
#include <iomanip>
#include <chrono>
#include <cuda_runtime.h>
#include <nvml.h>
#define CURVE_ID 1
#include "/icicle/icicle/curves/curve_config.cuh"
using namespace curve_config;
#include "api/bn254.h"
#include "vec_ops/vec_ops.cuh"
using namespace vec_ops;
using namespace bn254;
typedef scalar_t T;
// typedef point_field_t T;
const std::string curve = "BN254";
#define MAX_THREADS_PER_BLOCK 256
template <typename T>
__global__ void vectorMult(T* vec_a, T* vec_b, T* vec_r, size_t n_elments)
int vector_mult(T* vec_b, T* vec_a, T* vec_result, size_t n_elments, device_context::DeviceContext ctx)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_elments) { vec_r[tid] = vec_a[tid] * vec_b[tid]; }
}
template <typename T>
int vector_mult(T* vec_b, T* vec_a, T* vec_result, size_t n_elments)
{
// Set the grid and block dimensions
int num_blocks = (int)ceil((float)n_elments / MAX_THREADS_PER_BLOCK);
int threads_per_block = MAX_THREADS_PER_BLOCK;
// Call the kernel to perform element-wise modular multiplication
vectorMult<T><<<num_blocks, threads_per_block>>>(vec_a, vec_b, vec_result, n_elments);
vec_ops::VecOpsConfig config = vec_ops::DefaultVecOpsConfig();
config.is_a_on_device = true;
config.is_b_on_device = true;
config.is_result_on_device = true;
cudaError_t err = bn254_mul_cuda(vec_a, vec_b, n_elments, config, vec_result);
if (err != cudaSuccess) {
std::cerr << "Failed to multiply vectors - " << cudaGetErrorString(err) << std::endl;
return 0;
}
return 0;
}
int main(int argc, char** argv)
{
const unsigned vector_size = 1 << 20;
const unsigned repetitions = 1 << 20;
const unsigned vector_size = 1 << 15;
const unsigned repetitions = 1 << 15;
cudaError_t err;
nvmlInit();
@@ -49,7 +41,6 @@ int main(int argc, char** argv)
} else {
std::cerr << "Failed to get GPU model name." << std::endl;
}
unsigned power_limit;
nvmlDeviceGetPowerManagementLimit(device, &power_limit);
@@ -71,33 +62,27 @@ int main(int argc, char** argv)
T* host_in1 = (T*)malloc(vector_size * sizeof(T));
T* host_in2 = (T*)malloc(vector_size * sizeof(T));
std::cout << "Initializing vectors with random data" << std::endl;
for (int i = 0; i < vector_size; i++) {
if ((i > 0) && i % (1 << 20) == 0) std::cout << "Elements: " << i << std::endl;
host_in1[i] = T::rand_host();
host_in2[i] = T::rand_host();
}
T::rand_host_many(host_in1, vector_size);
T::rand_host_many(host_in2, vector_size);
// device data
device_context::DeviceContext ctx = device_context::get_default_device_context();
T* device_in1;
T* device_in2;
T* device_out;
err = cudaMalloc((void**)&device_in1, vector_size * sizeof(T));
if (err != cudaSuccess) {
std::cerr << "Failed to allocate device memory - " << cudaGetErrorString(err) << std::endl;
return 0;
}
err = cudaMalloc((void**)&device_in2, vector_size * sizeof(T));
if (err != cudaSuccess) {
std::cerr << "Failed to allocate device memory - " << cudaGetErrorString(err) << std::endl;
return 0;
}
err = cudaMalloc((void**)&device_out, vector_size * sizeof(T));
if (err != cudaSuccess) {
std::cerr << "Failed to allocate device memory - " << cudaGetErrorString(err) << std::endl;
return 0;
@@ -105,14 +90,12 @@ int main(int argc, char** argv)
// copy from host to device
err = cudaMemcpy(device_in1, host_in1, vector_size * sizeof(T), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << "Failed to copy data from host to device - " << cudaGetErrorString(err) << std::endl;
return 0;
}
err = cudaMemcpy(device_in2, host_in2, vector_size * sizeof(T), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << "Failed to copy data from host to device - " << cudaGetErrorString(err) << std::endl;
return 0;
@@ -121,12 +104,7 @@ int main(int argc, char** argv)
std::cout << "Starting warm-up" << std::endl;
// Warm-up loop
for (int i = 0; i < repetitions; i++) {
vector_mult(device_in1, device_in2, device_out, vector_size);
// err = lde::Mul(device_in1, device_in2, vector_size, is_on_device, is_montgomery, ctx, device_out);
// if (err != cudaSuccess) {
// std::cerr << "Failed to call lde::Mul" << cudaGetErrorString(err) << std::endl;
// return 0;
// }
vector_mult(device_in1, device_in2, device_out, vector_size, ctx);
}
std::cout << "Starting benchmarking" << std::endl;
@@ -142,17 +120,10 @@ int main(int argc, char** argv)
std::cerr << "Failed to get GPU temperature." << std::endl;
}
auto start_time = std::chrono::high_resolution_clock::now();
// Benchmark loop
for (int i = 0; i < repetitions; i++) {
vector_mult(device_in1, device_in2, device_out, vector_size);
// err = lde::Mul(device_in1, device_in2, vector_size, is_on_device, is_montgomery, ctx, device_out);
// if (err != cudaSuccess) {
// std::cerr << "Failed to call lde::Mul" << cudaGetErrorString(err) << std::endl;
// return 0;
// }
vector_mult(device_in1, device_in2, device_out, vector_size, ctx);
}
auto end_time = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
std::cout << "Elapsed time: " << duration.count() << " microseconds" << std::endl;
@@ -179,14 +150,13 @@ int main(int argc, char** argv)
// validate multiplication here...
// clean up and exit
free(host_in1);
free(host_in2);
free(host_out);
cudaFree(device_in1);
cudaFree(device_in2);
cudaFree(device_out);
nvmlShutdown();
return 0;
}

Some files were not shown because too many files have changed in this diff Show More