mirror of
https://github.com/pseXperiments/icicle.git
synced 2026-01-06 22:24:06 -05:00
GoLang bindings for v1.x (#386)
This commit is contained in:
4
.github/changed-files.yml
vendored
4
.github/changed-files.yml
vendored
@@ -1,5 +1,7 @@
|
||||
golang:
|
||||
- goicicle/**/*.go'
|
||||
- wrappers/golang/**/*.go'
|
||||
- wrappers/golang/**/*.h'
|
||||
- wrappers/golang/**/*.tmpl'
|
||||
- go.mod
|
||||
rust:
|
||||
- wrappers/rust
|
||||
|
||||
28
.github/workflows/main-build.yml
vendored
28
.github/workflows/main-build.yml
vendored
@@ -80,18 +80,22 @@ jobs:
|
||||
# Building from the root workspace will build all members of the workspace by default
|
||||
run: cargo build --release --verbose
|
||||
|
||||
# TODO: Re-enable once Golang bindings for v1+ is finished
|
||||
# build-golang-linux:
|
||||
# name: Build Golang on Linux
|
||||
# runs-on: [self-hosted, Linux, X64, icicle]
|
||||
# needs: check-changed-files
|
||||
# steps:
|
||||
# - name: Checkout Repo
|
||||
# uses: actions/checkout@v3
|
||||
# - name: Build CUDA libs
|
||||
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
# run: make all
|
||||
# working-directory: ./goicicle
|
||||
build-golang-linux:
|
||||
name: Build Golang on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: check-changed-files
|
||||
strategy:
|
||||
matrix:
|
||||
curve: [bn254, bls12_381, bls12_377, bw6_761]
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Build CUDA libs
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
working-directory: ./wrappers/golang
|
||||
run: |
|
||||
export CPATH=$CPATH:/usr/local/cuda/include
|
||||
./build.sh ${{ matrix.curve }} ON
|
||||
|
||||
# TODO: Add once Golang make file supports building for Windows
|
||||
# build-golang-windows:
|
||||
|
||||
39
.github/workflows/main-test.yml
vendored
39
.github/workflows/main-test.yml
vendored
@@ -75,20 +75,25 @@ jobs:
|
||||
if: needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: ctest
|
||||
|
||||
# TODO: Re-enable once Golang bindings for v1+ is finished
|
||||
# test-golang-linux:
|
||||
# name: Test Golang on Linux
|
||||
# runs-on: [self-hosted, Linux, X64, icicle]
|
||||
# needs: check-changed-files
|
||||
# steps:
|
||||
# - name: Checkout Repo
|
||||
# uses: actions/checkout@v3
|
||||
# - name: Build CUDA libs
|
||||
# working-directory: ./goicicle
|
||||
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
# run: make libbn254.so
|
||||
# - name: Run Golang Tests
|
||||
# if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
# run: |
|
||||
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/goicicle
|
||||
# go test ./goicicle/curves/bn254 -count=1
|
||||
test-golang-linux:
|
||||
name: Test Golang on Linux
|
||||
runs-on: [self-hosted, Linux, X64, icicle]
|
||||
needs: check-changed-files
|
||||
# strategy:
|
||||
# matrix:
|
||||
# curve: [bn254, bls12_381, bls12_377, bw6_761]
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Build CUDA libs
|
||||
working-directory: ./wrappers/golang
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
# builds all curves with g2 ON
|
||||
run: |
|
||||
export CPATH=$CPATH:/usr/local/cuda/include
|
||||
./build.sh all ON
|
||||
- name: Run Golang Tests
|
||||
if: needs.check-changed-files.outputs.golang == 'true' || needs.check-changed-files.outputs.cpp_cuda == 'true'
|
||||
run: |
|
||||
export CPATH=$CPATH:/usr/local/cuda/include
|
||||
go test --tags=g2 ./... -count=1 -timeout 60m
|
||||
|
||||
@@ -143,10 +143,10 @@ See [LICENSE-MIT][LMIT] for details.
|
||||
[GRANT_PROGRAM]: https://medium.com/@ingonyama/icicle-for-researchers-grants-challenges-9be1f040998e
|
||||
[ICICLE-CORE]: ./icicle/
|
||||
[ICICLE-RUST]: ./wrappers/rust/
|
||||
[ICICLE-GO]: ./goicicle/
|
||||
[ICICLE-GO]: ./wrappers/golang/
|
||||
[ICICLE-CORE-README]: ./icicle/README.md
|
||||
[ICICLE-RUST-README]: ./wrappers/rust/README.md
|
||||
[ICICLE-GO-README]: ./goicicle/README.md
|
||||
[ICICLE-GO-README]: ./wrappers/golang/README.md
|
||||
[documentation]: https://dev.ingonyama.com/icicle/overview
|
||||
[examples]: ./examples/
|
||||
|
||||
|
||||
18
go.mod
18
go.mod
@@ -3,15 +3,19 @@ module github.com/ingonyama-zk/icicle
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/consensys/bavard v0.1.13
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/bits-and-blooms/bitset v1.7.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
36
go.sum
36
go.sum
@@ -1,19 +1,37 @@
|
||||
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
|
||||
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
CUDA_ROOT_DIR = /usr/local/cuda
|
||||
NVCC = $(CUDA_ROOT_DIR)/bin/nvcc
|
||||
CFLAGS = -Xcompiler -fPIC -std=c++17
|
||||
LDFLAGS = -shared
|
||||
FEATURES = -DG2_DEFINED
|
||||
|
||||
TARGET_BN254 = libbn254.so
|
||||
TARGET_BW6761 = libbw6761.so
|
||||
TARGET_BLS12_381 = libbls12_381.so
|
||||
TARGET_BLS12_377 = libbls12_377.so
|
||||
|
||||
VPATH = ../icicle/curves/bn254:../icicle/curves/bls12_377:../icicle/curves/bls12_381:../icicle/curves/bw6_761
|
||||
|
||||
SRCS_BN254 = lde.cu msm.cu projective.cu ve_mod_mult.cu
|
||||
SRCS_BW6761 = lde.cu msm.cu projective.cu ve_mod_mult.cu
|
||||
SRCS_BLS12_381 = lde.cu msm.cu projective.cu ve_mod_mult.cu poseidon.cu
|
||||
SRCS_BLS12_377 = lde.cu msm.cu projective.cu ve_mod_mult.cu
|
||||
|
||||
all: $(TARGET_BN254) $(TARGET_BLS12_381) $(TARGET_BLS12_377) $(TARGET_BW6761)
|
||||
|
||||
$(TARGET_BN254):
|
||||
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bn254/, $(SRCS_BN254)) -o $@
|
||||
|
||||
$(TARGET_BW6761):
|
||||
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bw6_761/, $(SRCS_BW6761)) -o $@
|
||||
|
||||
$(TARGET_BLS12_381):
|
||||
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bls12_381/, $(SRCS_BLS12_381)) -o $@
|
||||
|
||||
$(TARGET_BLS12_377):
|
||||
$(NVCC) $(FEATURES) $(CFLAGS) $(LDFLAGS) $(addprefix ../icicle/curves/bls12_377/, $(SRCS_BLS12_377)) -o $@
|
||||
|
||||
clean:
|
||||
rm -f $(TARGET_BN254) $(TARGET_BLS12_381) $(TARGET_BLS12_377) $(TARGET_BW6761)
|
||||
@@ -1,82 +0,0 @@
|
||||
# Golang Bindings
|
||||
|
||||
To build the shared library:
|
||||
|
||||
To build shared libraries for all supported curves.
|
||||
|
||||
```
|
||||
make all
|
||||
```
|
||||
|
||||
If you wish to build for a specific curve, for example bn254.
|
||||
|
||||
```
|
||||
make libbn254.so
|
||||
```
|
||||
|
||||
The current supported options are `libbn254.so`, `libbls12_381.so`, `libbls12_377.so` and `libbw6_671.so`. The resulting `.so` files are the compiled shared libraries for each curve.
|
||||
|
||||
Finally to allow your system to find the shared libraries
|
||||
|
||||
```
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH/<path_to_shared_libs>
|
||||
```
|
||||
|
||||
## Running golang tests
|
||||
|
||||
To run the tests for curve bn254.
|
||||
|
||||
```
|
||||
go test ./goicicle/curves/bn254 -count=1
|
||||
```
|
||||
|
||||
## Cleaning up
|
||||
|
||||
If you want to remove the compiled files
|
||||
|
||||
```
|
||||
make clean
|
||||
```
|
||||
|
||||
This will remove all shared libraries generated from the `make` file.
|
||||
|
||||
# How do Golang bindings work?
|
||||
|
||||
The shared libraries produced from the CUDA code compilation are used to bind Golang to ICICLE's CUDA code.
|
||||
|
||||
1. These shared libraries (`libbn254.so`, `libbls12_381.so`, `libbls12_377.so`, `libbw6_671.so`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
|
||||
|
||||
2. In your Go project, you can use `cgo` to link these shared libraries. Here's a basic example on how you can use `cgo` to link these libraries:
|
||||
|
||||
```go
|
||||
/*
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lbn254 -lbls12_381 -lbls12_377 -lbw6_671
|
||||
#include "icicle.h" // make sure you use the correct header file(s)
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
// Now you can call the C functions from the ICICLE libraries.
|
||||
// Note that C function calls are prefixed with 'C.' in Go code.
|
||||
}
|
||||
```
|
||||
|
||||
Replace `/path/to/shared/libs` with the actual path where the shared libraries are located on your system.
|
||||
|
||||
# Common issues
|
||||
|
||||
### Cannot find shared library
|
||||
|
||||
In some cases you may encounter the following error, despite exporting the correct `LD_LIBRARY_PATH`.
|
||||
|
||||
```
|
||||
/usr/local/go/pkg/tool/linux_amd64/link: running gcc failed: exit status 1
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
collect2: error: ld returned 1 exit status
|
||||
```
|
||||
|
||||
This is normally fixed by exporting the path to the shared library location in the following way: `export CGO_LDFLAGS="-L/<path_to_shared_lib>/"`
|
||||
@@ -1,328 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
const SCALAR_SIZE = 8
|
||||
const BASE_SIZE = 12
|
||||
|
||||
type G1ScalarField struct {
|
||||
S [SCALAR_SIZE]uint32
|
||||
}
|
||||
|
||||
type G1BaseField struct {
|
||||
S [BASE_SIZE]uint32
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField Constructors
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) SetZero() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1BaseField) SetOne() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
|
||||
S[0] = 1
|
||||
|
||||
f.S = S
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
|
||||
out := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_377_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bls12_377(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
|
||||
copy(f.S[:], limbs[:])
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField methods
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1BaseField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (p *G1ScalarField) Random() *G1ScalarField {
|
||||
outC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(p))
|
||||
C.random_scalar_bls12_377(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetZero() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetOne() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
S[0] = 1
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
|
||||
for i, v := range a.S {
|
||||
if b.S[i] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* PointBLS12_377
|
||||
*/
|
||||
|
||||
type G1ProjectivePoint struct {
|
||||
X, Y, Z G1BaseField
|
||||
}
|
||||
|
||||
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
|
||||
var yOne G1BaseField
|
||||
yOne.SetOne()
|
||||
|
||||
var xZero G1BaseField
|
||||
xZero.SetZero()
|
||||
|
||||
var zZero G1BaseField
|
||||
zZero.SetZero()
|
||||
|
||||
f.X = xZero
|
||||
f.Y = yOne
|
||||
f.Z = zZero
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
|
||||
// Cast *PointBLS12_377 to *C.BLS12_377_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It'S your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BLS12_377_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it'S fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_bls12_377(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) IsOnCurve() bool {
|
||||
point := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
|
||||
res := C.projective_is_on_curve_bls12_377(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
|
||||
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(p))
|
||||
C.random_projective_bls12_377(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
|
||||
return &G1PointAffine{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
var _z G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(x))
|
||||
_y.FromLimbs(GetFixedLimbs(y))
|
||||
_z.FromLimbs(GetFixedLimbs(z))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
p.Z = _z
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* PointAffineNoInfinityBLS12_377
|
||||
*/
|
||||
|
||||
type G1PointAffine struct {
|
||||
X, Y G1BaseField
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
|
||||
in := (*C.BLS12_377_projective_t)(unsafe.Pointer(projective))
|
||||
out := (*C.BLS12_377_affine_t)(unsafe.Pointer(p))
|
||||
|
||||
C.projective_to_affine_bls12_377(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
|
||||
var Z G1BaseField
|
||||
Z.SetOne()
|
||||
|
||||
return &G1ProjectivePoint{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
Z: Z,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(X))
|
||||
_y.FromLimbs(GetFixedLimbs(Y))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiplication
|
||||
*/
|
||||
|
||||
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&a[0]))
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_point_bls12_377(pointsC, scalarsC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
aC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_scalar_bls12_377(aC, bC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
//
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
c := make([]G1ScalarField, len(b))
|
||||
for i := range c {
|
||||
var p G1ScalarField
|
||||
p.SetZero()
|
||||
|
||||
c[i] = p
|
||||
}
|
||||
|
||||
aC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
cC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&c[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.matrix_vec_mod_mult_bls12_377(aC, bC, cC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
|
||||
if len(*slice) <= BASE_SIZE {
|
||||
limbs := [BASE_SIZE]uint32{}
|
||||
copy(limbs[:len(*slice)], *slice)
|
||||
return limbs
|
||||
}
|
||||
|
||||
panic("slice has too many elements")
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFieldBLS12_377One(t *testing.T) {
|
||||
var oneField G1BaseField
|
||||
oneField.SetOne()
|
||||
|
||||
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, oneField.S, rawOneField)
|
||||
}
|
||||
|
||||
func TestNewFieldBLS12_377Zero(t *testing.T) {
|
||||
var zeroField G1BaseField
|
||||
zeroField.SetZero()
|
||||
|
||||
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, zeroField.S, rawZeroField)
|
||||
}
|
||||
|
||||
func TestFieldBLS12_377ToBytesLe(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
|
||||
for i, v := range p.X.S {
|
||||
binary.LittleEndian.PutUint32(expected[i*4:], v)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.X.ToBytesLe(), expected)
|
||||
assert.Equal(t, len(p.X.ToBytesLe()), 32)
|
||||
}
|
||||
|
||||
func TestNewPointBLS12_377Zero(t *testing.T) {
|
||||
var pointZero G1ProjectivePoint
|
||||
pointZero.SetZero()
|
||||
|
||||
var baseOne G1BaseField
|
||||
baseOne.SetOne()
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, pointZero.X, zeroSanity)
|
||||
assert.Equal(t, pointZero.Y, baseOne)
|
||||
assert.Equal(t, pointZero.Z, zeroSanity)
|
||||
}
|
||||
|
||||
func TestFromProjectiveToAffine(t *testing.T) {
|
||||
var projective G1ProjectivePoint
|
||||
var affine G1PointAffine
|
||||
|
||||
projective.Random()
|
||||
|
||||
affine.FromProjective(&projective)
|
||||
var projective2 G1ProjectivePoint
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestBLS12_377Eq(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
var p2 G1ProjectivePoint
|
||||
p2.Random()
|
||||
|
||||
assert.Equal(t, p1.Eq(&p1), true)
|
||||
assert.Equal(t, p1.Eq(&p2), false)
|
||||
}
|
||||
|
||||
func TestBLS12_377StripZ(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
|
||||
p2ZLess := p1.StripZ()
|
||||
|
||||
assert.IsType(t, G1PointAffine{}, *p2ZLess)
|
||||
assert.Equal(t, p1.X, p2ZLess.X)
|
||||
assert.Equal(t, p1.Y, p2ZLess.Y)
|
||||
}
|
||||
|
||||
func TestPointBLS12_377fromLimbs(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
x := p.X.Limbs()
|
||||
y := p.Y.Limbs()
|
||||
z := p.Z.Limbs()
|
||||
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
zSlice := z[:]
|
||||
|
||||
var pFromLimbs G1ProjectivePoint
|
||||
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
|
||||
|
||||
assert.Equal(t, pFromLimbs, p)
|
||||
}
|
||||
|
||||
func TestNewPointAffineNoInfinityBLS12_377Zero(t *testing.T) {
|
||||
var zeroP G1PointAffine
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, zeroP.X, zeroSanity)
|
||||
assert.Equal(t, zeroP.Y, zeroSanity)
|
||||
}
|
||||
|
||||
func TestPointAffineNoInfinityBLS12_377FromLimbs(t *testing.T) {
|
||||
// Initialize your test values
|
||||
x := [12]uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
|
||||
y := [12]uint32{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
|
||||
// Execute your function
|
||||
var result G1PointAffine
|
||||
result.FromLimbs(&xSlice, &ySlice)
|
||||
|
||||
var xBase G1BaseField
|
||||
var yBase G1BaseField
|
||||
xBase.FromLimbs(x)
|
||||
yBase.FromLimbs(y)
|
||||
|
||||
// Define your expected result
|
||||
expected := G1PointAffine{
|
||||
X: xBase,
|
||||
Y: yBase,
|
||||
}
|
||||
|
||||
// Test if result is as expected
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestGetFixedLimbs(t *testing.T) {
|
||||
t.Run("case of valid input of length less than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of valid input of length 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of empty input", func(t *testing.T) {
|
||||
slice := []uint32{}
|
||||
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of input length greater than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("the code did not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
GetFixedLimbs(&slice)
|
||||
})
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
// G2 extension field
|
||||
|
||||
type G2Element [6]uint64
|
||||
|
||||
type ExtentionField struct {
|
||||
A0, A1 G2Element
|
||||
}
|
||||
|
||||
type G2PointAffine struct {
|
||||
X, Y ExtentionField
|
||||
}
|
||||
|
||||
type G2Point struct {
|
||||
X, Y, Z ExtentionField
|
||||
}
|
||||
|
||||
func (p *G2Point) Random() *G2Point {
|
||||
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
|
||||
C.random_g2_projective_bls12_377(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
|
||||
out := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.g2_projective_from_affine_bls12_377(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) Eq(pCompare *G2Point) bool {
|
||||
// Cast *PointBLS12_377 to *C.BLS12_377_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It's your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it's fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_g2_bls12_377(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (f *G2Element) ToBytesLe() []byte {
|
||||
var bytes []byte
|
||||
for _, val := range f {
|
||||
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
|
||||
binary.LittleEndian.PutUint64(buf, val)
|
||||
bytes = append(bytes, buf...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
|
||||
out := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(projective))
|
||||
|
||||
C.g2_projective_to_affine_bls12_377(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) IsOnCurve() bool {
|
||||
// Directly copy memory from the C struct to the Go struct
|
||||
point := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(p))
|
||||
res := C.g2_projective_is_on_curve_bls12_377(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestG2Eqg2(t *testing.T) {
|
||||
var point G2Point
|
||||
|
||||
point.Random()
|
||||
|
||||
assert.True(t, point.Eq(&point))
|
||||
}
|
||||
|
||||
func TestG2FromProjectiveToAffine(t *testing.T) {
|
||||
var projective G2Point
|
||||
projective.Random()
|
||||
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&projective)
|
||||
|
||||
var projective2 G2Point
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestG2Eqg2NotEqual(t *testing.T) {
|
||||
var point G2Point
|
||||
point.Random()
|
||||
|
||||
var point2 G2Point
|
||||
point2.Random()
|
||||
|
||||
assert.False(t, point.Eq(&point2))
|
||||
}
|
||||
|
||||
func TestG2ToBytes(t *testing.T) {
|
||||
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
|
||||
bytes := element.ToBytesLe()
|
||||
|
||||
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
|
||||
}
|
||||
|
||||
func TestG2ShouldConvertToProjective(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var pointProjective G2Point
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G2PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
var proj G2Point
|
||||
proj.FromAffine(&pointAffine)
|
||||
|
||||
assert.True(t, proj.IsOnCurve())
|
||||
assert.True(t, pointProjective.Eq(&proj))
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdbool.h>
|
||||
// msm.h
|
||||
|
||||
#ifndef _BLS12_377_MSM_H
|
||||
#define _BLS12_377_MSM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BLS12_377 projective and affine structs
|
||||
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
|
||||
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
|
||||
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
|
||||
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
|
||||
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
|
||||
typedef cudaStream_t CudaStream_t;
|
||||
|
||||
int msm_cuda_bls12_377(
|
||||
BLS12_377_projective_t* out, BLS12_377_affine_t* points, BLS12_377_scalar_t* scalars, size_t count, size_t device_id);
|
||||
|
||||
int msm_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* out,
|
||||
BLS12_377_affine_t* points,
|
||||
BLS12_377_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_scalar_t* d_scalars,
|
||||
BLS12_377_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_scalar_t* d_scalars,
|
||||
BLS12_377_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id);
|
||||
|
||||
int msm_g2_cuda_bls12_377(
|
||||
BLS12_377_g2_projective_t* out,
|
||||
BLS12_377_g2_affine_t* points,
|
||||
BLS12_377_scalar_t* scalars,
|
||||
size_t count,
|
||||
size_t device_id);
|
||||
int msm_batch_g2_cuda_bls12_377(
|
||||
BLS12_377_g2_projective_t* out,
|
||||
BLS12_377_g2_affine_t* points,
|
||||
BLS12_377_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
int commit_g2_cuda_bls12_377(
|
||||
BLS12_377_g2_projective_t* d_out,
|
||||
BLS12_377_scalar_t* d_scalars,
|
||||
BLS12_377_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
int commit_batch_g2_cuda_bls12_377(
|
||||
BLS12_377_g2_projective_t* d_out,
|
||||
BLS12_377_scalar_t* d_scalars,
|
||||
BLS12_377_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id,
|
||||
cudaStream_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_377_MSM_H */
|
||||
@@ -1,195 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ntt.h
|
||||
|
||||
#ifndef _BLS12_377_NTT_H
|
||||
#define _BLS12_377_NTT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BLS12_377 projective and affine structs
|
||||
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
|
||||
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
|
||||
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
|
||||
|
||||
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
|
||||
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
|
||||
|
||||
int ntt_cuda_bls12_377(BLS12_377_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ntt_batch_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
int ecntt_cuda_bls12_377(BLS12_377_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ecntt_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
BLS12_377_scalar_t*
|
||||
build_domain_cuda_bls12_377(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
|
||||
int interpolate_scalars_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_on_coset_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_on_coset_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_evaluations,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_batch_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_batch_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out,
|
||||
BLS12_377_scalar_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* d_out,
|
||||
BLS12_377_projective_t* d_coefficients,
|
||||
BLS12_377_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_377_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int reverse_order_scalars_cuda_bls12_377(BLS12_377_scalar_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_scalars_batch_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int reverse_order_points_cuda_bls12_377(BLS12_377_projective_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_points_batch_cuda_bls12_377(
|
||||
BLS12_377_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int add_scalars_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out, BLS12_377_scalar_t* d_in1, BLS12_377_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int sub_scalars_cuda_bls12_377(
|
||||
BLS12_377_scalar_t* d_out, BLS12_377_scalar_t* d_in1, BLS12_377_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int to_montgomery_scalars_cuda_bls12_377(BLS12_377_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_scalars_cuda_bls12_377(BLS12_377_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g1
|
||||
int to_montgomery_proj_points_cuda_bls12_377(BLS12_377_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_cuda_bls12_377(BLS12_377_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_cuda_bls12_377(BLS12_377_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_cuda_bls12_377(BLS12_377_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g2
|
||||
int to_montgomery_proj_points_g2_cuda_bls12_377(BLS12_377_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_g2_cuda_bls12_377(BLS12_377_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_g2_cuda_bls12_377(BLS12_377_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_g2_cuda_bls12_377(BLS12_377_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_377_NTT_H */
|
||||
@@ -1,50 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// projective.h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
|
||||
typedef struct BLS12_377_g2_projective_t BLS12_377_g2_projective_t;
|
||||
typedef struct BLS12_377_affine_t BLS12_377_affine_t;
|
||||
typedef struct BLS12_377_g2_affine_t BLS12_377_g2_affine_t;
|
||||
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
|
||||
|
||||
bool projective_is_on_curve_bls12_377(BLS12_377_projective_t* point1);
|
||||
|
||||
int random_scalar_bls12_377(BLS12_377_scalar_t* out);
|
||||
int random_projective_bls12_377(BLS12_377_projective_t* out);
|
||||
BLS12_377_projective_t* projective_zero_bls12_377();
|
||||
int projective_to_affine_bls12_377(BLS12_377_affine_t* out, BLS12_377_projective_t* point1);
|
||||
int projective_from_affine_bls12_377(BLS12_377_projective_t* out, BLS12_377_affine_t* point1);
|
||||
|
||||
int random_g2_projective_bls12_377(BLS12_377_g2_projective_t* out);
|
||||
int g2_projective_to_affine_bls12_377(BLS12_377_g2_affine_t* out, BLS12_377_g2_projective_t* point1);
|
||||
int g2_projective_from_affine_bls12_377(BLS12_377_g2_projective_t* out, BLS12_377_g2_affine_t* point1);
|
||||
bool g2_projective_is_on_curve_bls12_377(BLS12_377_g2_projective_t* point1);
|
||||
|
||||
bool eq_bls12_377(BLS12_377_projective_t* point1, BLS12_377_projective_t* point2);
|
||||
bool eq_g2_bls12_377(BLS12_377_g2_projective_t* point1, BLS12_377_g2_projective_t* point2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,49 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ve_mod_mult.h
|
||||
|
||||
#ifndef _BLS12_377_VEC_MULT_H
|
||||
#define _BLS12_377_VEC_MULT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BLS12_377_projective_t BLS12_377_projective_t;
|
||||
typedef struct BLS12_377_scalar_t BLS12_377_scalar_t;
|
||||
|
||||
int32_t vec_mod_mult_point_bls12_377(
|
||||
BLS12_377_projective_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_scalar_bls12_377(
|
||||
BLS12_377_scalar_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_device_scalar_bls12_377(
|
||||
BLS12_377_scalar_t* inout, BLS12_377_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
|
||||
int32_t matrix_vec_mod_mult_bls12_377(
|
||||
BLS12_377_scalar_t* matrix_flattened,
|
||||
BLS12_377_scalar_t* input,
|
||||
BLS12_377_scalar_t* output,
|
||||
size_t n_elments,
|
||||
size_t device_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_377_VEC_MULT_H */
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
|
||||
// #include "msm.h"
|
||||
import "C"
|
||||
|
||||
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_377_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(out))
|
||||
ret := C.msm_cuda_bls12_377(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_cuda_bls12_377 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(out))
|
||||
|
||||
ret := C.msm_g2_cuda_bls12_377(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_g2_cuda_bls12_377 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G1ProjectivePoint, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G1ProjectivePoint, batchSize)
|
||||
|
||||
for i := 0; i < len(out); i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.SetZero()
|
||||
|
||||
out[i] = p
|
||||
}
|
||||
|
||||
outC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BLS12_377_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_cuda_bls12_377(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bls12_377 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G2Point, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G2Point, batchSize)
|
||||
|
||||
outC := (*C.BLS12_377_g2_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BLS12_377_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bls12_377(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bls12_377 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BLS12_377_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_377_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BLS12_377_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_g2_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BLS12_377_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_377_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.commit_batch_cuda_bls12_377(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BLS12_377_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bls12_377(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,360 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func GeneratePoints(count int) []G1PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G1PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
var pointProjective G1ProjectivePoint
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G1PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
points = append(points, pointAffine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func GeneratePointsProj(count int) []G1ProjectivePoint {
|
||||
// Declare a slice of integers
|
||||
var points []G1ProjectivePoint
|
||||
// Use a loop to populate the slice
|
||||
for i := 0; i < count; i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
points = append(points, p)
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func GenerateScalars(count int, skewed bool) []G1ScalarField {
|
||||
// Declare a slice of integers
|
||||
var scalars []G1ScalarField
|
||||
|
||||
var rand G1ScalarField
|
||||
var zero G1ScalarField
|
||||
var one G1ScalarField
|
||||
var randLarge G1ScalarField
|
||||
|
||||
zero.SetZero()
|
||||
one.SetOne()
|
||||
randLarge.Random()
|
||||
|
||||
if skewed && count > 1_200_000 {
|
||||
for i := 0; i < count-1_200_000; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
|
||||
for i := 0; i < 600_000; i++ {
|
||||
scalars = append(scalars, randLarge)
|
||||
}
|
||||
for i := 0; i < 400_000; i++ {
|
||||
scalars = append(scalars, zero)
|
||||
}
|
||||
for i := 0; i < 200_000; i++ {
|
||||
scalars = append(scalars, one)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
}
|
||||
|
||||
return scalars[:count]
|
||||
}
|
||||
|
||||
func TestMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G1ProjectivePoint)
|
||||
startTime := time.Now()
|
||||
_, e := Msm(out, points, scalars, 0) // non mont
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1<<v - 1
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := count * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := Commit(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.True(t, outHost[0].IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommit(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := msmSize * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := msmSize * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
|
||||
|
||||
if e != 0 {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchMSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmBatch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBLS12_377 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMSM(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G1ProjectivePoint)
|
||||
_, e := Msm(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// G2
|
||||
func GenerateG2Points(count int) []G2PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G2PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var p G2Point
|
||||
p.Random()
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&p)
|
||||
|
||||
points = append(points, affine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func TestMsmG2BLS12_377(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMsmG2BLS12_377(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GenerateG2Points(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitG2MSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
var sizeCheckG2PointAffine G2PointAffine
|
||||
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
|
||||
|
||||
var sizeCheckG2Point G2Point
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := CommitG2(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G2Point, 1)
|
||||
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.Equal(t, len(outHost), 1)
|
||||
result := outHost[0]
|
||||
|
||||
assert.True(t, result.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchG2MSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBLS12_377 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
|
||||
// #include "ntt.h"
|
||||
import "C"
|
||||
|
||||
const (
|
||||
NONE = 0
|
||||
DIF = 1
|
||||
DIT = 2
|
||||
)
|
||||
|
||||
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
|
||||
ret := C.ntt_cuda_bls12_377(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
isInverseC := C.bool(isInverse)
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
|
||||
ret := C.ntt_batch_cuda_bls12_377(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
|
||||
valuesC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
|
||||
ret := C.ecntt_cuda_bls12_377(valuesC, n, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
valuesC := (*C.BLS12_377_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
|
||||
ret := C.ecntt_batch_cuda_bls12_377(valuesC, n, batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
|
||||
domain_size := C.uint32_t(d_size)
|
||||
logn := C.uint32_t(log_d_size)
|
||||
is_inverse := C.bool(inverse)
|
||||
|
||||
dp := C.build_domain_cuda_bls12_377(domain_size, logn, is_inverse, 0, 0)
|
||||
|
||||
if dp == nil {
|
||||
err = errors.New("nullptr returned from generating twiddles")
|
||||
return unsafe.Pointer(nil), err
|
||||
}
|
||||
|
||||
return unsafe.Pointer(dp), nil
|
||||
}
|
||||
|
||||
// Reverses d_scalars in-place
|
||||
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
lenC := C.int(len)
|
||||
if success := C.reverse_order_scalars_cuda_bls12_377(scalarsC, lenC, 0, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
|
||||
size_d := size * 32
|
||||
dp, err := goicicle.CudaMalloc(size_d)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d_out := (*C.BLS12_377_scalar_t)(dp)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BLS12_377_scalar_t)(twiddles)
|
||||
cosetPowersC := (*C.BLS12_377_scalar_t)(cosetPowers)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.interpolate_scalars_on_coset_cuda_bls12_377(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
|
||||
} else {
|
||||
ret = C.interpolate_scalars_cuda_bls12_377(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
|
||||
}
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
}
|
||||
|
||||
return unsafe.Pointer(d_out)
|
||||
}
|
||||
|
||||
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
|
||||
scalars_outC := (*C.BLS12_377_scalar_t)(scalars_out)
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BLS12_377_scalar_t)(twiddles)
|
||||
coset_powersC := (*C.BLS12_377_scalar_t)(coset_powers)
|
||||
sizeC := C.uint(scalars_size)
|
||||
twiddlesC_size := C.uint(twiddles_size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.evaluate_scalars_on_coset_cuda_bls12_377(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
|
||||
} else {
|
||||
ret = C.evaluate_scalars_cuda_bls12_377(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
|
||||
}
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BLS12_377_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BLS12_377_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.add_scalars_cuda_bls12_377(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error adding scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BLS12_377_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BLS12_377_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.sub_scalars_cuda_bls12_377(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error subtracting scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.to_montgomery_scalars_cuda_bls12_377(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_377_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.from_montgomery_scalars_cuda_bls12_377(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BLS12_377_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_cuda_bls12_377(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BLS12_377_g2_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_g2_cuda_bls12_377(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNttBLS12_377Batch(t *testing.T) {
|
||||
count := 1 << 20
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
NttBatch(&nttResult, false, count, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBLS12_377CompareToGnarkDIF(t *testing.T) {
|
||||
count := 1 << 2
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestINttBLS12_377CompareToGnarkDIT(t *testing.T) {
|
||||
count := 1 << 3
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, true, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBLS12_377(t *testing.T) {
|
||||
count := 1 << 3
|
||||
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
inttResult := make([]G1ScalarField, len(nttResult))
|
||||
copy(inttResult, nttResult)
|
||||
|
||||
assert.Equal(t, inttResult, nttResult)
|
||||
Ntt(&inttResult, true, 0)
|
||||
assert.Equal(t, inttResult, scalars)
|
||||
}
|
||||
|
||||
func TestNttBatchBLS12_377(t *testing.T) {
|
||||
count := 1 << 5
|
||||
batches := 4
|
||||
|
||||
scalars := GenerateScalars(count*batches, false)
|
||||
|
||||
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
start := i * count
|
||||
end := (i + 1) * count
|
||||
batch := make([]G1ScalarField, len(scalars[start:end]))
|
||||
copy(batch, scalars[start:end])
|
||||
scalarVecOfVec = append(scalarVecOfVec, batch)
|
||||
}
|
||||
|
||||
nttBatchResult := make([]G1ScalarField, len(scalars))
|
||||
copy(nttBatchResult, scalars)
|
||||
|
||||
NttBatch(&nttBatchResult, false, count, 0)
|
||||
|
||||
var nttResultVecOfVec [][]G1ScalarField
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
// Clone the slice
|
||||
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
|
||||
copy(clone, scalarVecOfVec[i])
|
||||
|
||||
// Add it to the result vector of vectors
|
||||
nttResultVecOfVec = append(nttResultVecOfVec, clone)
|
||||
|
||||
// Call the ntt_bls12_377 function
|
||||
Ntt(&nttResultVecOfVec[i], false, 0)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nttBatchResult, scalars)
|
||||
|
||||
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i := 0; i < batches; i++ {
|
||||
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
|
||||
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNTT(b *testing.B) {
|
||||
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logNTTSize := range LOG_NTT_SIZES {
|
||||
nttSize := 1 << logNTTSize
|
||||
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
|
||||
scalars := GenerateScalars(nttSize, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
for n := 0; n < b.N; n++ {
|
||||
Ntt(&nttResult, false, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package bls12377
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// Function to convert [8]uint32 to [4]uint64
|
||||
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
|
||||
var arr64 [4]uint64
|
||||
for i := 0; i < len(arr32); i += 2 {
|
||||
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
|
||||
}
|
||||
return arr64
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr4(arr64 [4]uint64) [8]uint32 {
|
||||
var arr32 [8]uint32
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr6(arr64 [6]uint64) [12]uint32 {
|
||||
var arr32 [12]uint32
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12377
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_377
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
|
||||
scalarVec1C := (*C.BLS12_377_scalar_t)(scalarVec1)
|
||||
scalarVec2C := (*C.BLS12_377_scalar_t)(scalarVec2)
|
||||
sizeC := C.size_t(size)
|
||||
|
||||
ret := C.vec_mod_mult_device_scalar_bls12_377(scalarVec1C, scalarVec2C, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error multiplying scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
const SCALAR_SIZE = 8
|
||||
const BASE_SIZE = 12
|
||||
|
||||
type G1ScalarField struct {
|
||||
S [SCALAR_SIZE]uint32
|
||||
}
|
||||
|
||||
type G1BaseField struct {
|
||||
S [BASE_SIZE]uint32
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField Constructors
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) SetZero() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1BaseField) SetOne() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
|
||||
S[0] = 1
|
||||
|
||||
f.S = S
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
|
||||
out := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_381_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bls12_381(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
|
||||
copy(f.S[:], limbs[:])
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField methods
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1BaseField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (p *G1ScalarField) Random() *G1ScalarField {
|
||||
outC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(p))
|
||||
C.random_scalar_bls12_381(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetZero() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetOne() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
S[0] = 1
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
|
||||
for i, v := range a.S {
|
||||
if b.S[i] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* PointBLS12_381
|
||||
*/
|
||||
|
||||
type G1ProjectivePoint struct {
|
||||
X, Y, Z G1BaseField
|
||||
}
|
||||
|
||||
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
|
||||
var yOne G1BaseField
|
||||
yOne.SetOne()
|
||||
|
||||
var xZero G1BaseField
|
||||
xZero.SetZero()
|
||||
|
||||
var zZero G1BaseField
|
||||
zZero.SetZero()
|
||||
|
||||
f.X = xZero
|
||||
f.Y = yOne
|
||||
f.Z = zZero
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
|
||||
// Cast *PointBLS12_381 to *C.BLS12_381_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It'S your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BLS12_381_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it'S fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_bls12_381(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) IsOnCurve() bool {
|
||||
point := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
|
||||
res := C.projective_is_on_curve_bls12_381(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
|
||||
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(p))
|
||||
C.random_projective_bls12_381(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
|
||||
return &G1PointAffine{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
var _z G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(x))
|
||||
_y.FromLimbs(GetFixedLimbs(y))
|
||||
_z.FromLimbs(GetFixedLimbs(z))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
p.Z = _z
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* PointAffineNoInfinityBLS12_381
|
||||
*/
|
||||
|
||||
type G1PointAffine struct {
|
||||
X, Y G1BaseField
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
|
||||
in := (*C.BLS12_381_projective_t)(unsafe.Pointer(projective))
|
||||
out := (*C.BLS12_381_affine_t)(unsafe.Pointer(p))
|
||||
|
||||
C.projective_to_affine_bls12_381(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
|
||||
var Z G1BaseField
|
||||
Z.SetOne()
|
||||
|
||||
return &G1ProjectivePoint{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
Z: Z,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(X))
|
||||
_y.FromLimbs(GetFixedLimbs(Y))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiplication
|
||||
*/
|
||||
|
||||
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&a[0]))
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_point_bls12_381(pointsC, scalarsC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
aC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_scalar_bls12_381(aC, bC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
//
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
c := make([]G1ScalarField, len(b))
|
||||
for i := range c {
|
||||
var p G1ScalarField
|
||||
p.SetZero()
|
||||
|
||||
c[i] = p
|
||||
}
|
||||
|
||||
aC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
cC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&c[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.matrix_vec_mod_mult_bls12_381(aC, bC, cC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
|
||||
if len(*slice) <= BASE_SIZE {
|
||||
limbs := [BASE_SIZE]uint32{}
|
||||
copy(limbs[:len(*slice)], *slice)
|
||||
return limbs
|
||||
}
|
||||
|
||||
panic("slice has too many elements")
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFieldBLS12_381One(t *testing.T) {
|
||||
var oneField G1BaseField
|
||||
oneField.SetOne()
|
||||
|
||||
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, oneField.S, rawOneField)
|
||||
}
|
||||
|
||||
func TestNewFieldBLS12_381Zero(t *testing.T) {
|
||||
var zeroField G1BaseField
|
||||
zeroField.SetZero()
|
||||
|
||||
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, zeroField.S, rawZeroField)
|
||||
}
|
||||
|
||||
func TestFieldBLS12_381ToBytesLe(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
|
||||
for i, v := range p.X.S {
|
||||
binary.LittleEndian.PutUint32(expected[i*4:], v)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.X.ToBytesLe(), expected)
|
||||
assert.Equal(t, len(p.X.ToBytesLe()), 32)
|
||||
}
|
||||
|
||||
func TestNewPointBLS12_381Zero(t *testing.T) {
|
||||
var pointZero G1ProjectivePoint
|
||||
pointZero.SetZero()
|
||||
|
||||
var baseOne G1BaseField
|
||||
baseOne.SetOne()
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, pointZero.X, zeroSanity)
|
||||
assert.Equal(t, pointZero.Y, baseOne)
|
||||
assert.Equal(t, pointZero.Z, zeroSanity)
|
||||
}
|
||||
|
||||
func TestFromProjectiveToAffine(t *testing.T) {
|
||||
var projective G1ProjectivePoint
|
||||
var affine G1PointAffine
|
||||
|
||||
projective.Random()
|
||||
|
||||
affine.FromProjective(&projective)
|
||||
var projective2 G1ProjectivePoint
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestBLS12_381Eq(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
var p2 G1ProjectivePoint
|
||||
p2.Random()
|
||||
|
||||
assert.Equal(t, p1.Eq(&p1), true)
|
||||
assert.Equal(t, p1.Eq(&p2), false)
|
||||
}
|
||||
|
||||
func TestBLS12_381StripZ(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
|
||||
p2ZLess := p1.StripZ()
|
||||
|
||||
assert.IsType(t, G1PointAffine{}, *p2ZLess)
|
||||
assert.Equal(t, p1.X, p2ZLess.X)
|
||||
assert.Equal(t, p1.Y, p2ZLess.Y)
|
||||
}
|
||||
|
||||
func TestPointBLS12_381fromLimbs(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
x := p.X.Limbs()
|
||||
y := p.Y.Limbs()
|
||||
z := p.Z.Limbs()
|
||||
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
zSlice := z[:]
|
||||
|
||||
var pFromLimbs G1ProjectivePoint
|
||||
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
|
||||
|
||||
assert.Equal(t, pFromLimbs, p)
|
||||
}
|
||||
|
||||
func TestNewPointAffineNoInfinityBLS12_381Zero(t *testing.T) {
|
||||
var zeroP G1PointAffine
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, zeroP.X, zeroSanity)
|
||||
assert.Equal(t, zeroP.Y, zeroSanity)
|
||||
}
|
||||
|
||||
func TestPointAffineNoInfinityBLS12_381FromLimbs(t *testing.T) {
|
||||
// Initialize your test values
|
||||
x := [12]uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
|
||||
y := [12]uint32{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
|
||||
// Execute your function
|
||||
var result G1PointAffine
|
||||
result.FromLimbs(&xSlice, &ySlice)
|
||||
|
||||
var xBase G1BaseField
|
||||
var yBase G1BaseField
|
||||
xBase.FromLimbs(x)
|
||||
yBase.FromLimbs(y)
|
||||
|
||||
// Define your expected result
|
||||
expected := G1PointAffine{
|
||||
X: xBase,
|
||||
Y: yBase,
|
||||
}
|
||||
|
||||
// Test if result is as expected
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestGetFixedLimbs(t *testing.T) {
|
||||
t.Run("case of valid input of length less than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of valid input of length 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of empty input", func(t *testing.T) {
|
||||
slice := []uint32{}
|
||||
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of input length greater than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("the code did not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
GetFixedLimbs(&slice)
|
||||
})
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
// G2 extension field
|
||||
|
||||
type G2Element [6]uint64
|
||||
|
||||
type ExtentionField struct {
|
||||
A0, A1 G2Element
|
||||
}
|
||||
|
||||
type G2PointAffine struct {
|
||||
X, Y ExtentionField
|
||||
}
|
||||
|
||||
type G2Point struct {
|
||||
X, Y, Z ExtentionField
|
||||
}
|
||||
|
||||
func (p *G2Point) Random() *G2Point {
|
||||
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
|
||||
C.random_g2_projective_bls12_381(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
|
||||
out := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.g2_projective_from_affine_bls12_381(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) Eq(pCompare *G2Point) bool {
|
||||
// Cast *PointBLS12_381 to *C.BLS12_381_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It's your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it's fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_g2_bls12_381(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (f *G2Element) ToBytesLe() []byte {
|
||||
var bytes []byte
|
||||
for _, val := range f {
|
||||
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
|
||||
binary.LittleEndian.PutUint64(buf, val)
|
||||
bytes = append(bytes, buf...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
|
||||
out := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(p))
|
||||
in := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(projective))
|
||||
|
||||
C.g2_projective_to_affine_bls12_381(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) IsOnCurve() bool {
|
||||
// Directly copy memory from the C struct to the Go struct
|
||||
point := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(p))
|
||||
res := C.g2_projective_is_on_curve_bls12_381(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestG2Eqg2(t *testing.T) {
|
||||
var point G2Point
|
||||
|
||||
point.Random()
|
||||
|
||||
assert.True(t, point.Eq(&point))
|
||||
}
|
||||
|
||||
func TestG2FromProjectiveToAffine(t *testing.T) {
|
||||
var projective G2Point
|
||||
projective.Random()
|
||||
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&projective)
|
||||
|
||||
var projective2 G2Point
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestG2Eqg2NotEqual(t *testing.T) {
|
||||
var point G2Point
|
||||
point.Random()
|
||||
|
||||
var point2 G2Point
|
||||
point2.Random()
|
||||
|
||||
assert.False(t, point.Eq(&point2))
|
||||
}
|
||||
|
||||
func TestG2ToBytes(t *testing.T) {
|
||||
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
|
||||
bytes := element.ToBytesLe()
|
||||
|
||||
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
|
||||
}
|
||||
|
||||
func TestG2ShouldConvertToProjective(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var pointProjective G2Point
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G2PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
var proj G2Point
|
||||
proj.FromAffine(&pointAffine)
|
||||
|
||||
assert.True(t, proj.IsOnCurve())
|
||||
assert.True(t, pointProjective.Eq(&proj))
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdbool.h>
|
||||
// msm.h
|
||||
|
||||
#ifndef _BLS12_381_MSM_H
|
||||
#define _BLS12_381_MSM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BLS12_381 projective and affine structs
|
||||
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
|
||||
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
|
||||
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
|
||||
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
|
||||
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
|
||||
typedef cudaStream_t CudaStream_t;
|
||||
|
||||
int msm_cuda_bls12_381(
|
||||
BLS12_381_projective_t* out, BLS12_381_affine_t* points, BLS12_381_scalar_t* scalars, size_t count, size_t device_id);
|
||||
|
||||
int msm_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* out,
|
||||
BLS12_381_affine_t* points,
|
||||
BLS12_381_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_scalar_t* d_scalars,
|
||||
BLS12_381_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_scalar_t* d_scalars,
|
||||
BLS12_381_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id);
|
||||
|
||||
int msm_g2_cuda_bls12_381(
|
||||
BLS12_381_g2_projective_t* out,
|
||||
BLS12_381_g2_affine_t* points,
|
||||
BLS12_381_scalar_t* scalars,
|
||||
size_t count,
|
||||
size_t device_id);
|
||||
int msm_batch_g2_cuda_bls12_381(
|
||||
BLS12_381_g2_projective_t* out,
|
||||
BLS12_381_g2_affine_t* points,
|
||||
BLS12_381_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
int commit_g2_cuda_bls12_381(
|
||||
BLS12_381_g2_projective_t* d_out,
|
||||
BLS12_381_scalar_t* d_scalars,
|
||||
BLS12_381_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
int commit_batch_g2_cuda_bls12_381(
|
||||
BLS12_381_g2_projective_t* d_out,
|
||||
BLS12_381_scalar_t* d_scalars,
|
||||
BLS12_381_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id,
|
||||
cudaStream_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_381_MSM_H */
|
||||
@@ -1,195 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ntt.h
|
||||
|
||||
#ifndef _BLS12_381_NTT_H
|
||||
#define _BLS12_381_NTT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BLS12_381 projective and affine structs
|
||||
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
|
||||
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
|
||||
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
|
||||
|
||||
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
|
||||
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
|
||||
|
||||
int ntt_cuda_bls12_381(BLS12_381_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ntt_batch_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
int ecntt_cuda_bls12_381(BLS12_381_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ecntt_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
BLS12_381_scalar_t*
|
||||
build_domain_cuda_bls12_381(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
|
||||
int interpolate_scalars_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_on_coset_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_on_coset_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_evaluations,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_batch_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_batch_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out,
|
||||
BLS12_381_scalar_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* d_out,
|
||||
BLS12_381_projective_t* d_coefficients,
|
||||
BLS12_381_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BLS12_381_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int reverse_order_scalars_cuda_bls12_381(BLS12_381_scalar_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_scalars_batch_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int reverse_order_points_cuda_bls12_381(BLS12_381_projective_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_points_batch_cuda_bls12_381(
|
||||
BLS12_381_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int add_scalars_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out, BLS12_381_scalar_t* d_in1, BLS12_381_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int sub_scalars_cuda_bls12_381(
|
||||
BLS12_381_scalar_t* d_out, BLS12_381_scalar_t* d_in1, BLS12_381_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int to_montgomery_scalars_cuda_bls12_381(BLS12_381_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_scalars_cuda_bls12_381(BLS12_381_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g1
|
||||
int to_montgomery_proj_points_cuda_bls12_381(BLS12_381_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_cuda_bls12_381(BLS12_381_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_cuda_bls12_381(BLS12_381_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_cuda_bls12_381(BLS12_381_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g2
|
||||
int to_montgomery_proj_points_g2_cuda_bls12_381(BLS12_381_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_g2_cuda_bls12_381(BLS12_381_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_g2_cuda_bls12_381(BLS12_381_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_g2_cuda_bls12_381(BLS12_381_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_381_NTT_H */
|
||||
@@ -1,50 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// projective.h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
|
||||
typedef struct BLS12_381_g2_projective_t BLS12_381_g2_projective_t;
|
||||
typedef struct BLS12_381_affine_t BLS12_381_affine_t;
|
||||
typedef struct BLS12_381_g2_affine_t BLS12_381_g2_affine_t;
|
||||
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
|
||||
|
||||
bool projective_is_on_curve_bls12_381(BLS12_381_projective_t* point1);
|
||||
|
||||
int random_scalar_bls12_381(BLS12_381_scalar_t* out);
|
||||
int random_projective_bls12_381(BLS12_381_projective_t* out);
|
||||
BLS12_381_projective_t* projective_zero_bls12_381();
|
||||
int projective_to_affine_bls12_381(BLS12_381_affine_t* out, BLS12_381_projective_t* point1);
|
||||
int projective_from_affine_bls12_381(BLS12_381_projective_t* out, BLS12_381_affine_t* point1);
|
||||
|
||||
int random_g2_projective_bls12_381(BLS12_381_g2_projective_t* out);
|
||||
int g2_projective_to_affine_bls12_381(BLS12_381_g2_affine_t* out, BLS12_381_g2_projective_t* point1);
|
||||
int g2_projective_from_affine_bls12_381(BLS12_381_g2_projective_t* out, BLS12_381_g2_affine_t* point1);
|
||||
bool g2_projective_is_on_curve_bls12_381(BLS12_381_g2_projective_t* point1);
|
||||
|
||||
bool eq_bls12_381(BLS12_381_projective_t* point1, BLS12_381_projective_t* point2);
|
||||
bool eq_g2_bls12_381(BLS12_381_g2_projective_t* point1, BLS12_381_g2_projective_t* point2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,49 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ve_mod_mult.h
|
||||
|
||||
#ifndef _BLS12_381_VEC_MULT_H
|
||||
#define _BLS12_381_VEC_MULT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BLS12_381_projective_t BLS12_381_projective_t;
|
||||
typedef struct BLS12_381_scalar_t BLS12_381_scalar_t;
|
||||
|
||||
int32_t vec_mod_mult_point_bls12_381(
|
||||
BLS12_381_projective_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_scalar_bls12_381(
|
||||
BLS12_381_scalar_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_device_scalar_bls12_381(
|
||||
BLS12_381_scalar_t* inout, BLS12_381_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
|
||||
int32_t matrix_vec_mod_mult_bls12_381(
|
||||
BLS12_381_scalar_t* matrix_flattened,
|
||||
BLS12_381_scalar_t* input,
|
||||
BLS12_381_scalar_t* output,
|
||||
size_t n_elments,
|
||||
size_t device_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLS12_381_VEC_MULT_H */
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
|
||||
// #include "msm.h"
|
||||
import "C"
|
||||
|
||||
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_381_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(out))
|
||||
ret := C.msm_cuda_bls12_381(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_cuda_bls12_381 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(out))
|
||||
|
||||
ret := C.msm_g2_cuda_bls12_381(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_g2_cuda_bls12_381 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G1ProjectivePoint, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G1ProjectivePoint, batchSize)
|
||||
|
||||
for i := 0; i < len(out); i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.SetZero()
|
||||
|
||||
out[i] = p
|
||||
}
|
||||
|
||||
outC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BLS12_381_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_cuda_bls12_381(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bls12_381 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G2Point, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G2Point, batchSize)
|
||||
|
||||
outC := (*C.BLS12_381_g2_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BLS12_381_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bls12_381(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bls12_381 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BLS12_381_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_381_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BLS12_381_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_g2_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BLS12_381_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_381_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.commit_batch_cuda_bls12_381(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BLS12_381_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bls12_381(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,360 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func GeneratePoints(count int) []G1PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G1PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
var pointProjective G1ProjectivePoint
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G1PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
points = append(points, pointAffine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func GeneratePointsProj(count int) []G1ProjectivePoint {
|
||||
// Declare a slice of integers
|
||||
var points []G1ProjectivePoint
|
||||
// Use a loop to populate the slice
|
||||
for i := 0; i < count; i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
points = append(points, p)
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func GenerateScalars(count int, skewed bool) []G1ScalarField {
|
||||
// Declare a slice of integers
|
||||
var scalars []G1ScalarField
|
||||
|
||||
var rand G1ScalarField
|
||||
var zero G1ScalarField
|
||||
var one G1ScalarField
|
||||
var randLarge G1ScalarField
|
||||
|
||||
zero.SetZero()
|
||||
one.SetOne()
|
||||
randLarge.Random()
|
||||
|
||||
if skewed && count > 1_200_000 {
|
||||
for i := 0; i < count-1_200_000; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
|
||||
for i := 0; i < 600_000; i++ {
|
||||
scalars = append(scalars, randLarge)
|
||||
}
|
||||
for i := 0; i < 400_000; i++ {
|
||||
scalars = append(scalars, zero)
|
||||
}
|
||||
for i := 0; i < 200_000; i++ {
|
||||
scalars = append(scalars, one)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
}
|
||||
|
||||
return scalars[:count]
|
||||
}
|
||||
|
||||
func TestMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G1ProjectivePoint)
|
||||
startTime := time.Now()
|
||||
_, e := Msm(out, points, scalars, 0) // non mont
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1<<v - 1
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := count * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := Commit(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.True(t, outHost[0].IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommit(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := msmSize * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := msmSize * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
|
||||
|
||||
if e != 0 {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchMSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmBatch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBLS12_381 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMSM(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G1ProjectivePoint)
|
||||
_, e := Msm(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// G2
|
||||
func GenerateG2Points(count int) []G2PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G2PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var p G2Point
|
||||
p.Random()
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&p)
|
||||
|
||||
points = append(points, affine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func TestMsmG2BLS12_381(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMsmG2BLS12_381(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GenerateG2Points(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitG2MSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
var sizeCheckG2PointAffine G2PointAffine
|
||||
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
|
||||
|
||||
var sizeCheckG2Point G2Point
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := CommitG2(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G2Point, 1)
|
||||
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.Equal(t, len(outHost), 1)
|
||||
result := outHost[0]
|
||||
|
||||
assert.True(t, result.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchG2MSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBLS12_381 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
|
||||
// #include "ntt.h"
|
||||
import "C"
|
||||
|
||||
const (
|
||||
NONE = 0
|
||||
DIF = 1
|
||||
DIT = 2
|
||||
)
|
||||
|
||||
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
|
||||
ret := C.ntt_cuda_bls12_381(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
isInverseC := C.bool(isInverse)
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
|
||||
ret := C.ntt_batch_cuda_bls12_381(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
|
||||
valuesC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
|
||||
ret := C.ecntt_cuda_bls12_381(valuesC, n, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
valuesC := (*C.BLS12_381_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
|
||||
ret := C.ecntt_batch_cuda_bls12_381(valuesC, n, batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
|
||||
domain_size := C.uint32_t(d_size)
|
||||
logn := C.uint32_t(log_d_size)
|
||||
is_inverse := C.bool(inverse)
|
||||
|
||||
dp := C.build_domain_cuda_bls12_381(domain_size, logn, is_inverse, 0, 0)
|
||||
|
||||
if dp == nil {
|
||||
err = errors.New("nullptr returned from generating twiddles")
|
||||
return unsafe.Pointer(nil), err
|
||||
}
|
||||
|
||||
return unsafe.Pointer(dp), nil
|
||||
}
|
||||
|
||||
// Reverses d_scalars in-place
|
||||
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
lenC := C.int(len)
|
||||
if success := C.reverse_order_scalars_cuda_bls12_381(scalarsC, lenC, 0, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
|
||||
size_d := size * 32
|
||||
dp, err := goicicle.CudaMalloc(size_d)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d_out := (*C.BLS12_381_scalar_t)(dp)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BLS12_381_scalar_t)(twiddles)
|
||||
cosetPowersC := (*C.BLS12_381_scalar_t)(cosetPowers)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.interpolate_scalars_on_coset_cuda_bls12_381(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
|
||||
} else {
|
||||
ret = C.interpolate_scalars_cuda_bls12_381(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
|
||||
}
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
}
|
||||
|
||||
return unsafe.Pointer(d_out)
|
||||
}
|
||||
|
||||
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
|
||||
scalars_outC := (*C.BLS12_381_scalar_t)(scalars_out)
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BLS12_381_scalar_t)(twiddles)
|
||||
coset_powersC := (*C.BLS12_381_scalar_t)(coset_powers)
|
||||
sizeC := C.uint(scalars_size)
|
||||
twiddlesC_size := C.uint(twiddles_size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.evaluate_scalars_on_coset_cuda_bls12_381(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
|
||||
} else {
|
||||
ret = C.evaluate_scalars_cuda_bls12_381(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
|
||||
}
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BLS12_381_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BLS12_381_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.add_scalars_cuda_bls12_381(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error adding scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BLS12_381_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BLS12_381_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.sub_scalars_cuda_bls12_381(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error subtracting scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.to_montgomery_scalars_cuda_bls12_381(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BLS12_381_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.from_montgomery_scalars_cuda_bls12_381(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BLS12_381_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_cuda_bls12_381(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BLS12_381_g2_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_g2_cuda_bls12_381(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNttBLS12_381Batch(t *testing.T) {
|
||||
count := 1 << 20
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
NttBatch(&nttResult, false, count, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBLS12_381CompareToGnarkDIF(t *testing.T) {
|
||||
count := 1 << 2
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestINttBLS12_381CompareToGnarkDIT(t *testing.T) {
|
||||
count := 1 << 3
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, true, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBLS12_381(t *testing.T) {
|
||||
count := 1 << 3
|
||||
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
inttResult := make([]G1ScalarField, len(nttResult))
|
||||
copy(inttResult, nttResult)
|
||||
|
||||
assert.Equal(t, inttResult, nttResult)
|
||||
Ntt(&inttResult, true, 0)
|
||||
assert.Equal(t, inttResult, scalars)
|
||||
}
|
||||
|
||||
func TestNttBatchBLS12_381(t *testing.T) {
|
||||
count := 1 << 5
|
||||
batches := 4
|
||||
|
||||
scalars := GenerateScalars(count*batches, false)
|
||||
|
||||
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
start := i * count
|
||||
end := (i + 1) * count
|
||||
batch := make([]G1ScalarField, len(scalars[start:end]))
|
||||
copy(batch, scalars[start:end])
|
||||
scalarVecOfVec = append(scalarVecOfVec, batch)
|
||||
}
|
||||
|
||||
nttBatchResult := make([]G1ScalarField, len(scalars))
|
||||
copy(nttBatchResult, scalars)
|
||||
|
||||
NttBatch(&nttBatchResult, false, count, 0)
|
||||
|
||||
var nttResultVecOfVec [][]G1ScalarField
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
// Clone the slice
|
||||
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
|
||||
copy(clone, scalarVecOfVec[i])
|
||||
|
||||
// Add it to the result vector of vectors
|
||||
nttResultVecOfVec = append(nttResultVecOfVec, clone)
|
||||
|
||||
// Call the ntt_bls12_381 function
|
||||
Ntt(&nttResultVecOfVec[i], false, 0)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nttBatchResult, scalars)
|
||||
|
||||
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i := 0; i < batches; i++ {
|
||||
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
|
||||
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNTT(b *testing.B) {
|
||||
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logNTTSize := range LOG_NTT_SIZES {
|
||||
nttSize := 1 << logNTTSize
|
||||
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
|
||||
scalars := GenerateScalars(nttSize, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
for n := 0; n < b.N; n++ {
|
||||
Ntt(&nttResult, false, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package bls12381
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// Function to convert [8]uint32 to [4]uint64
|
||||
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
|
||||
var arr64 [4]uint64
|
||||
for i := 0; i < len(arr32); i += 2 {
|
||||
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
|
||||
}
|
||||
return arr64
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr4(arr64 [4]uint64) [8]uint32 {
|
||||
var arr32 [8]uint32
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr6(arr64 [6]uint64) [12]uint32 {
|
||||
var arr32 [12]uint32
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package bls12381
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertUint32ArrToUint64Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input [8]uint32
|
||||
want [4]uint64
|
||||
}{
|
||||
{
|
||||
name: "Test with incremental array",
|
||||
input: [8]uint32{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
want: [4]uint64{4294967298, 12884901892, 21474836486, 30064771080},
|
||||
},
|
||||
{
|
||||
name: "Test with all zeros",
|
||||
input: [8]uint32{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
want: [4]uint64{0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "Test with maximum uint32 values",
|
||||
input: [8]uint32{4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295},
|
||||
want: [4]uint64{18446744073709551615, 18446744073709551615, 18446744073709551615, 18446744073709551615},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating min and max uint32 values",
|
||||
input: [8]uint32{0, 4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295},
|
||||
want: [4]uint64{4294967295, 4294967295, 4294967295, 4294967295},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating max and min uint32 values",
|
||||
input: [8]uint32{4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295, 0},
|
||||
want: [4]uint64{18446744069414584320, 18446744069414584320, 18446744069414584320, 18446744069414584320},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint32ArrToUint64Arr(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("got %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertUint64ArrToUint32Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input [6]uint64
|
||||
expected [12]uint32
|
||||
}{
|
||||
{
|
||||
name: "test one",
|
||||
input: [6]uint64{1, 2, 3, 4, 5, 6},
|
||||
expected: [12]uint32{1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0},
|
||||
},
|
||||
{
|
||||
name: "test two",
|
||||
input: [6]uint64{100, 200, 300, 400, 500, 600},
|
||||
expected: [12]uint32{100, 0, 200, 0, 300, 0, 400, 0, 500, 0, 600, 0},
|
||||
},
|
||||
{
|
||||
name: "test three",
|
||||
input: [6]uint64{1000, 2000, 3000, 4000, 5000, 6000},
|
||||
expected: [12]uint32{1000, 0, 2000, 0, 3000, 0, 4000, 0, 5000, 0, 6000, 0},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint64ArrToUint32Arr6(tc.input)
|
||||
if got != tc.expected {
|
||||
t.Errorf("got %v, want %v", got, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bls12381
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbls12_381
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
|
||||
scalarVec1C := (*C.BLS12_381_scalar_t)(scalarVec1)
|
||||
scalarVec2C := (*C.BLS12_381_scalar_t)(scalarVec2)
|
||||
sizeC := C.size_t(size)
|
||||
|
||||
ret := C.vec_mod_mult_device_scalar_bls12_381(scalarVec1C, scalarVec2C, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error multiplying scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
const SCALAR_SIZE = 8
|
||||
const BASE_SIZE = 8
|
||||
|
||||
type G1ScalarField struct {
|
||||
S [SCALAR_SIZE]uint32
|
||||
}
|
||||
|
||||
type G1BaseField struct {
|
||||
S [BASE_SIZE]uint32
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField Constructors
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) SetZero() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1BaseField) SetOne() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
|
||||
S[0] = 1
|
||||
|
||||
f.S = S
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
|
||||
out := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BN254_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bn254(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
|
||||
copy(f.S[:], limbs[:])
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField methods
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1BaseField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (p *G1ScalarField) Random() *G1ScalarField {
|
||||
outC := (*C.BN254_scalar_t)(unsafe.Pointer(p))
|
||||
C.random_scalar_bn254(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetZero() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetOne() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
S[0] = 1
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
|
||||
for i, v := range a.S {
|
||||
if b.S[i] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* PointBN254
|
||||
*/
|
||||
|
||||
type G1ProjectivePoint struct {
|
||||
X, Y, Z G1BaseField
|
||||
}
|
||||
|
||||
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
|
||||
var yOne G1BaseField
|
||||
yOne.SetOne()
|
||||
|
||||
var xZero G1BaseField
|
||||
xZero.SetZero()
|
||||
|
||||
var zZero G1BaseField
|
||||
zZero.SetZero()
|
||||
|
||||
f.X = xZero
|
||||
f.Y = yOne
|
||||
f.Z = zZero
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
|
||||
// Cast *PointBN254 to *C.BN254_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It'S your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BN254_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it'S fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_bn254(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) IsOnCurve() bool {
|
||||
point := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
res := C.projective_is_on_curve_bn254(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
|
||||
outC := (*C.BN254_projective_t)(unsafe.Pointer(p))
|
||||
C.random_projective_bn254(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
|
||||
return &G1PointAffine{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
var _z G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(x))
|
||||
_y.FromLimbs(GetFixedLimbs(y))
|
||||
_z.FromLimbs(GetFixedLimbs(z))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
p.Z = _z
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* PointAffineNoInfinityBN254
|
||||
*/
|
||||
|
||||
type G1PointAffine struct {
|
||||
X, Y G1BaseField
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
|
||||
in := (*C.BN254_projective_t)(unsafe.Pointer(projective))
|
||||
out := (*C.BN254_affine_t)(unsafe.Pointer(p))
|
||||
|
||||
C.projective_to_affine_bn254(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
|
||||
var Z G1BaseField
|
||||
Z.SetOne()
|
||||
|
||||
return &G1ProjectivePoint{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
Z: Z,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(X))
|
||||
_y.FromLimbs(GetFixedLimbs(Y))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiplication
|
||||
*/
|
||||
|
||||
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
pointsC := (*C.BN254_projective_t)(unsafe.Pointer(&a[0]))
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_point_bn254(pointsC, scalarsC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
aC := (*C.BN254_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_scalar_bn254(aC, bC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
//
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
c := make([]G1ScalarField, len(b))
|
||||
for i := range c {
|
||||
var p G1ScalarField
|
||||
p.SetZero()
|
||||
|
||||
c[i] = p
|
||||
}
|
||||
|
||||
aC := (*C.BN254_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BN254_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
cC := (*C.BN254_scalar_t)(unsafe.Pointer(&c[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.matrix_vec_mod_mult_bn254(aC, bC, cC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
|
||||
if len(*slice) <= BASE_SIZE {
|
||||
limbs := [BASE_SIZE]uint32{}
|
||||
copy(limbs[:len(*slice)], *slice)
|
||||
return limbs
|
||||
}
|
||||
|
||||
panic("slice has too many elements")
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFieldBN254One(t *testing.T) {
|
||||
var oneField G1BaseField
|
||||
oneField.SetOne()
|
||||
|
||||
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, oneField.S, rawOneField)
|
||||
}
|
||||
|
||||
func TestNewFieldBN254Zero(t *testing.T) {
|
||||
var zeroField G1BaseField
|
||||
zeroField.SetZero()
|
||||
|
||||
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, zeroField.S, rawZeroField)
|
||||
}
|
||||
|
||||
func TestFieldBN254ToBytesLe(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
|
||||
for i, v := range p.X.S {
|
||||
binary.LittleEndian.PutUint32(expected[i*4:], v)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.X.ToBytesLe(), expected)
|
||||
assert.Equal(t, len(p.X.ToBytesLe()), 32)
|
||||
}
|
||||
|
||||
func TestNewPointBN254Zero(t *testing.T) {
|
||||
var pointZero G1ProjectivePoint
|
||||
pointZero.SetZero()
|
||||
|
||||
var baseOne G1BaseField
|
||||
baseOne.SetOne()
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, pointZero.X, zeroSanity)
|
||||
assert.Equal(t, pointZero.Y, baseOne)
|
||||
assert.Equal(t, pointZero.Z, zeroSanity)
|
||||
}
|
||||
|
||||
func TestFromProjectiveToAffine(t *testing.T) {
|
||||
var projective G1ProjectivePoint
|
||||
var affine G1PointAffine
|
||||
|
||||
projective.Random()
|
||||
|
||||
affine.FromProjective(&projective)
|
||||
var projective2 G1ProjectivePoint
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestBN254Eq(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
var p2 G1ProjectivePoint
|
||||
p2.Random()
|
||||
|
||||
assert.Equal(t, p1.Eq(&p1), true)
|
||||
assert.Equal(t, p1.Eq(&p2), false)
|
||||
}
|
||||
|
||||
func TestBN254StripZ(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
|
||||
p2ZLess := p1.StripZ()
|
||||
|
||||
assert.IsType(t, G1PointAffine{}, *p2ZLess)
|
||||
assert.Equal(t, p1.X, p2ZLess.X)
|
||||
assert.Equal(t, p1.Y, p2ZLess.Y)
|
||||
}
|
||||
|
||||
func TestPointBN254fromLimbs(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
x := p.X.Limbs()
|
||||
y := p.Y.Limbs()
|
||||
z := p.Z.Limbs()
|
||||
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
zSlice := z[:]
|
||||
|
||||
var pFromLimbs G1ProjectivePoint
|
||||
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
|
||||
|
||||
assert.Equal(t, pFromLimbs, p)
|
||||
}
|
||||
|
||||
func TestNewPointAffineNoInfinityBN254Zero(t *testing.T) {
|
||||
var zeroP G1PointAffine
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, zeroP.X, zeroSanity)
|
||||
assert.Equal(t, zeroP.Y, zeroSanity)
|
||||
}
|
||||
|
||||
func TestPointAffineNoInfinityBN254FromLimbs(t *testing.T) {
|
||||
// Initialize your test values
|
||||
x := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
y := [8]uint32{9, 10, 11, 12, 13, 14, 15, 16}
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
|
||||
// Execute your function
|
||||
var result G1PointAffine
|
||||
result.FromLimbs(&xSlice, &ySlice)
|
||||
|
||||
var xBase G1BaseField
|
||||
var yBase G1BaseField
|
||||
xBase.FromLimbs(x)
|
||||
yBase.FromLimbs(y)
|
||||
|
||||
// Define your expected result
|
||||
expected := G1PointAffine{
|
||||
X: xBase,
|
||||
Y: yBase,
|
||||
}
|
||||
|
||||
// Test if result is as expected
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestGetFixedLimbs(t *testing.T) {
|
||||
t.Run("case of valid input of length less than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of valid input of length 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of empty input", func(t *testing.T) {
|
||||
slice := []uint32{}
|
||||
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of input length greater than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("the code did not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
GetFixedLimbs(&slice)
|
||||
})
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
// G2 extension field
|
||||
|
||||
type G2Element [4]uint64
|
||||
|
||||
type ExtentionField struct {
|
||||
A0, A1 G2Element
|
||||
}
|
||||
|
||||
type G2PointAffine struct {
|
||||
X, Y ExtentionField
|
||||
}
|
||||
|
||||
type G2Point struct {
|
||||
X, Y, Z ExtentionField
|
||||
}
|
||||
|
||||
func (p *G2Point) Random() *G2Point {
|
||||
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
|
||||
C.random_g2_projective_bn254(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
|
||||
out := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BN254_g2_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.g2_projective_from_affine_bn254(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) Eq(pCompare *G2Point) bool {
|
||||
// Cast *PointBN254 to *C.BN254_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It's your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BN254_g2_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it's fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_g2_bn254(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (f *G2Element) ToBytesLe() []byte {
|
||||
var bytes []byte
|
||||
for _, val := range f {
|
||||
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
|
||||
binary.LittleEndian.PutUint64(buf, val)
|
||||
bytes = append(bytes, buf...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
|
||||
out := (*C.BN254_g2_affine_t)(unsafe.Pointer(p))
|
||||
in := (*C.BN254_g2_projective_t)(unsafe.Pointer(projective))
|
||||
|
||||
C.g2_projective_to_affine_bn254(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) IsOnCurve() bool {
|
||||
// Directly copy memory from the C struct to the Go struct
|
||||
point := (*C.BN254_g2_projective_t)(unsafe.Pointer(p))
|
||||
res := C.g2_projective_is_on_curve_bn254(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestG2Eqg2(t *testing.T) {
|
||||
var point G2Point
|
||||
|
||||
point.Random()
|
||||
|
||||
assert.True(t, point.Eq(&point))
|
||||
}
|
||||
|
||||
func TestG2FromProjectiveToAffine(t *testing.T) {
|
||||
var projective G2Point
|
||||
projective.Random()
|
||||
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&projective)
|
||||
|
||||
var projective2 G2Point
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestG2Eqg2NotEqual(t *testing.T) {
|
||||
var point G2Point
|
||||
point.Random()
|
||||
|
||||
var point2 G2Point
|
||||
point2.Random()
|
||||
|
||||
assert.False(t, point.Eq(&point2))
|
||||
}
|
||||
|
||||
func TestG2ToBytes(t *testing.T) {
|
||||
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
|
||||
bytes := element.ToBytesLe()
|
||||
|
||||
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
|
||||
}
|
||||
|
||||
func TestG2ShouldConvertToProjective(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var pointProjective G2Point
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G2PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
var proj G2Point
|
||||
proj.FromAffine(&pointAffine)
|
||||
|
||||
assert.True(t, proj.IsOnCurve())
|
||||
assert.True(t, pointProjective.Eq(&proj))
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdbool.h>
|
||||
// msm.h
|
||||
|
||||
#ifndef _BN254_MSM_H
|
||||
#define _BN254_MSM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BN254 projective and affine structs
|
||||
typedef struct BN254_projective_t BN254_projective_t;
|
||||
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
|
||||
typedef struct BN254_affine_t BN254_affine_t;
|
||||
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
|
||||
typedef struct BN254_scalar_t BN254_scalar_t;
|
||||
typedef cudaStream_t CudaStream_t;
|
||||
|
||||
int msm_cuda_bn254(
|
||||
BN254_projective_t* out, BN254_affine_t* points, BN254_scalar_t* scalars, size_t count, size_t device_id);
|
||||
|
||||
int msm_batch_cuda_bn254(
|
||||
BN254_projective_t* out,
|
||||
BN254_affine_t* points,
|
||||
BN254_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_scalar_t* d_scalars,
|
||||
BN254_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_scalar_t* d_scalars,
|
||||
BN254_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id);
|
||||
|
||||
int msm_g2_cuda_bn254(
|
||||
BN254_g2_projective_t* out, BN254_g2_affine_t* points, BN254_scalar_t* scalars, size_t count, size_t device_id);
|
||||
int msm_batch_g2_cuda_bn254(
|
||||
BN254_g2_projective_t* out,
|
||||
BN254_g2_affine_t* points,
|
||||
BN254_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
int commit_g2_cuda_bn254(
|
||||
BN254_g2_projective_t* d_out,
|
||||
BN254_scalar_t* d_scalars,
|
||||
BN254_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
int commit_batch_g2_cuda_bn254(
|
||||
BN254_g2_projective_t* d_out,
|
||||
BN254_scalar_t* d_scalars,
|
||||
BN254_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id,
|
||||
cudaStream_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BN254_MSM_H */
|
||||
@@ -1,193 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ntt.h
|
||||
|
||||
#ifndef _BN254_NTT_H
|
||||
#define _BN254_NTT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BN254 projective and affine structs
|
||||
typedef struct BN254_projective_t BN254_projective_t;
|
||||
typedef struct BN254_affine_t BN254_affine_t;
|
||||
typedef struct BN254_scalar_t BN254_scalar_t;
|
||||
|
||||
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
|
||||
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
|
||||
|
||||
int ntt_cuda_bn254(BN254_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ntt_batch_cuda_bn254(BN254_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
int ecntt_cuda_bn254(BN254_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ecntt_batch_cuda_bn254(
|
||||
BN254_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
BN254_scalar_t*
|
||||
build_domain_cuda_bn254(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
|
||||
int interpolate_scalars_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_batch_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_on_coset_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
BN254_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_on_coset_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_evaluations,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BN254_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_batch_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_batch_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BN254_scalar_t* coset_powers,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_batch_cuda_bn254(
|
||||
BN254_scalar_t* d_out,
|
||||
BN254_scalar_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BN254_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BN254_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_batch_cuda_bn254(
|
||||
BN254_projective_t* d_out,
|
||||
BN254_projective_t* d_coefficients,
|
||||
BN254_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BN254_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int reverse_order_scalars_cuda_bn254(BN254_scalar_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_scalars_batch_cuda_bn254(BN254_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int reverse_order_points_cuda_bn254(BN254_projective_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_points_batch_cuda_bn254(
|
||||
BN254_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int add_scalars_cuda_bn254(
|
||||
BN254_scalar_t* d_out, BN254_scalar_t* d_in1, BN254_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int sub_scalars_cuda_bn254(
|
||||
BN254_scalar_t* d_out, BN254_scalar_t* d_in1, BN254_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int to_montgomery_scalars_cuda_bn254(BN254_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_scalars_cuda_bn254(BN254_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g1
|
||||
int to_montgomery_proj_points_cuda_bn254(BN254_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_cuda_bn254(BN254_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_cuda_bn254(BN254_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_cuda_bn254(BN254_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g2
|
||||
int to_montgomery_proj_points_g2_cuda_bn254(BN254_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_g2_cuda_bn254(BN254_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_g2_cuda_bn254(BN254_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_g2_cuda_bn254(BN254_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BN254_NTT_H */
|
||||
@@ -1,50 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// projective.h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BN254_projective_t BN254_projective_t;
|
||||
typedef struct BN254_g2_projective_t BN254_g2_projective_t;
|
||||
typedef struct BN254_affine_t BN254_affine_t;
|
||||
typedef struct BN254_g2_affine_t BN254_g2_affine_t;
|
||||
typedef struct BN254_scalar_t BN254_scalar_t;
|
||||
|
||||
bool projective_is_on_curve_bn254(BN254_projective_t* point1);
|
||||
|
||||
int random_scalar_bn254(BN254_scalar_t* out);
|
||||
int random_projective_bn254(BN254_projective_t* out);
|
||||
BN254_projective_t* projective_zero_bn254();
|
||||
int projective_to_affine_bn254(BN254_affine_t* out, BN254_projective_t* point1);
|
||||
int projective_from_affine_bn254(BN254_projective_t* out, BN254_affine_t* point1);
|
||||
|
||||
int random_g2_projective_bn254(BN254_g2_projective_t* out);
|
||||
int g2_projective_to_affine_bn254(BN254_g2_affine_t* out, BN254_g2_projective_t* point1);
|
||||
int g2_projective_from_affine_bn254(BN254_g2_projective_t* out, BN254_g2_affine_t* point1);
|
||||
bool g2_projective_is_on_curve_bn254(BN254_g2_projective_t* point1);
|
||||
|
||||
bool eq_bn254(BN254_projective_t* point1, BN254_projective_t* point2);
|
||||
bool eq_g2_bn254(BN254_g2_projective_t* point1, BN254_g2_projective_t* point2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
|
||||
// #include "msm.h"
|
||||
import "C"
|
||||
|
||||
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BN254_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BN254_projective_t)(unsafe.Pointer(out))
|
||||
ret := C.msm_cuda_bn254(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_cuda_bn254 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BN254_g2_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(out))
|
||||
|
||||
ret := C.msm_g2_cuda_bn254(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_g2_cuda_bn254 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G1ProjectivePoint, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G1ProjectivePoint, batchSize)
|
||||
|
||||
for i := 0; i < len(out); i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.SetZero()
|
||||
|
||||
out[i] = p
|
||||
}
|
||||
|
||||
outC := (*C.BN254_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BN254_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_cuda_bn254(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bn254 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G2Point, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G2Point, batchSize)
|
||||
|
||||
outC := (*C.BN254_g2_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BN254_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bn254(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bn254 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BN254_projective_t)(d_out)
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BN254_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_cuda_bn254(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BN254_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BN254_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_g2_cuda_bn254(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BN254_projective_t)(d_out)
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BN254_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.commit_batch_cuda_bn254(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BN254_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BN254_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bn254(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,360 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func GeneratePoints(count int) []G1PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G1PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
var pointProjective G1ProjectivePoint
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G1PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
points = append(points, pointAffine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func GeneratePointsProj(count int) []G1ProjectivePoint {
|
||||
// Declare a slice of integers
|
||||
var points []G1ProjectivePoint
|
||||
// Use a loop to populate the slice
|
||||
for i := 0; i < count; i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
points = append(points, p)
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func GenerateScalars(count int, skewed bool) []G1ScalarField {
|
||||
// Declare a slice of integers
|
||||
var scalars []G1ScalarField
|
||||
|
||||
var rand G1ScalarField
|
||||
var zero G1ScalarField
|
||||
var one G1ScalarField
|
||||
var randLarge G1ScalarField
|
||||
|
||||
zero.SetZero()
|
||||
one.SetOne()
|
||||
randLarge.Random()
|
||||
|
||||
if skewed && count > 1_200_000 {
|
||||
for i := 0; i < count-1_200_000; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
|
||||
for i := 0; i < 600_000; i++ {
|
||||
scalars = append(scalars, randLarge)
|
||||
}
|
||||
for i := 0; i < 400_000; i++ {
|
||||
scalars = append(scalars, zero)
|
||||
}
|
||||
for i := 0; i < 200_000; i++ {
|
||||
scalars = append(scalars, one)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
}
|
||||
|
||||
return scalars[:count]
|
||||
}
|
||||
|
||||
func TestMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G1ProjectivePoint)
|
||||
startTime := time.Now()
|
||||
_, e := Msm(out, points, scalars, 0) // non mont
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1<<v - 1
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := count * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := Commit(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.True(t, outHost[0].IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommit(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := msmSize * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := msmSize * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
|
||||
|
||||
if e != 0 {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchMSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmBatch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBN254 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMSM(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G1ProjectivePoint)
|
||||
_, e := Msm(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// G2
|
||||
func GenerateG2Points(count int) []G2PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G2PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var p G2Point
|
||||
p.Random()
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&p)
|
||||
|
||||
points = append(points, affine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func TestMsmG2BN254(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMsmG2BN254(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GenerateG2Points(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitG2MSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
var sizeCheckG2PointAffine G2PointAffine
|
||||
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
|
||||
|
||||
var sizeCheckG2Point G2Point
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := CommitG2(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G2Point, 1)
|
||||
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.Equal(t, len(outHost), 1)
|
||||
result := outHost[0]
|
||||
|
||||
assert.True(t, result.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchG2MSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBN254 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
|
||||
// #include "ntt.h"
|
||||
import "C"
|
||||
|
||||
const (
|
||||
NONE = 0
|
||||
DIF = 1
|
||||
DIT = 2
|
||||
)
|
||||
|
||||
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
|
||||
ret := C.ntt_cuda_bn254(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
scalarsC := (*C.BN254_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
isInverseC := C.bool(isInverse)
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
|
||||
ret := C.ntt_batch_cuda_bn254(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
|
||||
valuesC := (*C.BN254_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
|
||||
ret := C.ecntt_cuda_bn254(valuesC, n, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
valuesC := (*C.BN254_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
|
||||
ret := C.ecntt_batch_cuda_bn254(valuesC, n, batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
|
||||
domain_size := C.uint32_t(d_size)
|
||||
logn := C.uint32_t(log_d_size)
|
||||
is_inverse := C.bool(inverse)
|
||||
|
||||
dp := C.build_domain_cuda_bn254(domain_size, logn, is_inverse, 0, 0)
|
||||
|
||||
if dp == nil {
|
||||
err = errors.New("nullptr returned from generating twiddles")
|
||||
return unsafe.Pointer(nil), err
|
||||
}
|
||||
|
||||
return unsafe.Pointer(dp), nil
|
||||
}
|
||||
|
||||
// Reverses d_scalars in-place
|
||||
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
lenC := C.int(len)
|
||||
if success := C.reverse_order_scalars_cuda_bn254(scalarsC, lenC, 0, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
|
||||
size_d := size * 32
|
||||
dp, err := goicicle.CudaMalloc(size_d)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d_out := (*C.BN254_scalar_t)(dp)
|
||||
scalarsC := (*C.BN254_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BN254_scalar_t)(twiddles)
|
||||
cosetPowersC := (*C.BN254_scalar_t)(cosetPowers)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.interpolate_scalars_on_coset_cuda_bn254(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
|
||||
} else {
|
||||
ret = C.interpolate_scalars_cuda_bn254(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
|
||||
}
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
}
|
||||
|
||||
return unsafe.Pointer(d_out)
|
||||
}
|
||||
|
||||
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
|
||||
scalars_outC := (*C.BN254_scalar_t)(scalars_out)
|
||||
scalarsC := (*C.BN254_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BN254_scalar_t)(twiddles)
|
||||
coset_powersC := (*C.BN254_scalar_t)(coset_powers)
|
||||
sizeC := C.uint(scalars_size)
|
||||
twiddlesC_size := C.uint(twiddles_size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.evaluate_scalars_on_coset_cuda_bn254(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
|
||||
} else {
|
||||
ret = C.evaluate_scalars_cuda_bn254(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
|
||||
}
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BN254_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BN254_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.add_scalars_cuda_bn254(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error adding scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BN254_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BN254_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.sub_scalars_cuda_bn254(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error subtracting scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.to_montgomery_scalars_cuda_bn254(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BN254_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.from_montgomery_scalars_cuda_bn254(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BN254_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_cuda_bn254(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BN254_g2_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_g2_cuda_bn254(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNttBN254Batch(t *testing.T) {
|
||||
count := 1 << 20
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
NttBatch(&nttResult, false, count, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBN254CompareToGnarkDIF(t *testing.T) {
|
||||
count := 1 << 2
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestINttBN254CompareToGnarkDIT(t *testing.T) {
|
||||
count := 1 << 3
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, true, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBN254(t *testing.T) {
|
||||
count := 1 << 3
|
||||
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
inttResult := make([]G1ScalarField, len(nttResult))
|
||||
copy(inttResult, nttResult)
|
||||
|
||||
assert.Equal(t, inttResult, nttResult)
|
||||
Ntt(&inttResult, true, 0)
|
||||
assert.Equal(t, inttResult, scalars)
|
||||
}
|
||||
|
||||
func TestNttBatchBN254(t *testing.T) {
|
||||
count := 1 << 5
|
||||
batches := 4
|
||||
|
||||
scalars := GenerateScalars(count*batches, false)
|
||||
|
||||
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
start := i * count
|
||||
end := (i + 1) * count
|
||||
batch := make([]G1ScalarField, len(scalars[start:end]))
|
||||
copy(batch, scalars[start:end])
|
||||
scalarVecOfVec = append(scalarVecOfVec, batch)
|
||||
}
|
||||
|
||||
nttBatchResult := make([]G1ScalarField, len(scalars))
|
||||
copy(nttBatchResult, scalars)
|
||||
|
||||
NttBatch(&nttBatchResult, false, count, 0)
|
||||
|
||||
var nttResultVecOfVec [][]G1ScalarField
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
// Clone the slice
|
||||
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
|
||||
copy(clone, scalarVecOfVec[i])
|
||||
|
||||
// Add it to the result vector of vectors
|
||||
nttResultVecOfVec = append(nttResultVecOfVec, clone)
|
||||
|
||||
// Call the ntt_bn254 function
|
||||
Ntt(&nttResultVecOfVec[i], false, 0)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nttBatchResult, scalars)
|
||||
|
||||
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i := 0; i < batches; i++ {
|
||||
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
|
||||
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNTT(b *testing.B) {
|
||||
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logNTTSize := range LOG_NTT_SIZES {
|
||||
nttSize := 1 << logNTTSize
|
||||
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
|
||||
scalars := GenerateScalars(nttSize, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
for n := 0; n < b.N; n++ {
|
||||
Ntt(&nttResult, false, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Function to convert [8]uint32 to [4]uint64
|
||||
func ConvertUint32ArrToUint64Arr(arr32 [8]uint32) [4]uint64 {
|
||||
var arr64 [4]uint64
|
||||
for i := 0; i < len(arr32); i += 2 {
|
||||
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
|
||||
}
|
||||
return arr64
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr(arr64 [4]uint64) [8]uint32 {
|
||||
var arr32 [8]uint32
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
|
||||
func TimeTrack(start time.Time) {
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// Skip this function, and fetch the PC and file for its parent.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
|
||||
// Retrieve a function object this functions parent.
|
||||
funcObj := runtime.FuncForPC(pc)
|
||||
|
||||
// Regex to extract just the function name (and not the module path).
|
||||
runtimeFunc := regexp.MustCompile(`^.*\.(.*)$`)
|
||||
name := runtimeFunc.ReplaceAllString(funcObj.Name(), "$1")
|
||||
|
||||
log.Println(fmt.Sprintf("%s took %s", name, elapsed))
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package bn254
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertUint32ArrToUint64Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input [8]uint32
|
||||
want [4]uint64
|
||||
}{
|
||||
{
|
||||
name: "Test with incremental array",
|
||||
input: [8]uint32{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
want: [4]uint64{4294967298, 12884901892, 21474836486, 30064771080},
|
||||
},
|
||||
{
|
||||
name: "Test with all zeros",
|
||||
input: [8]uint32{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
want: [4]uint64{0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "Test with maximum uint32 values",
|
||||
input: [8]uint32{4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295},
|
||||
want: [4]uint64{18446744073709551615, 18446744073709551615, 18446744073709551615, 18446744073709551615},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating min and max uint32 values",
|
||||
input: [8]uint32{0, 4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295},
|
||||
want: [4]uint64{4294967295, 4294967295, 4294967295, 4294967295},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating max and min uint32 values",
|
||||
input: [8]uint32{4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295, 0},
|
||||
want: [4]uint64{18446744069414584320, 18446744069414584320, 18446744069414584320, 18446744069414584320},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint32ArrToUint64Arr(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("got %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertUint64ArrToUint32Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input [4]uint64
|
||||
expected [8]uint32
|
||||
}{
|
||||
{
|
||||
name: "test one",
|
||||
input: [4]uint64{1, 2, 3, 4},
|
||||
expected: [8]uint32{1, 0, 2, 0, 3, 0, 4, 0},
|
||||
},
|
||||
{
|
||||
name: "test two",
|
||||
input: [4]uint64{100, 200, 300, 400},
|
||||
expected: [8]uint32{100, 0, 200, 0, 300, 0, 400, 0},
|
||||
},
|
||||
{
|
||||
name: "test three",
|
||||
input: [4]uint64{1000, 2000, 3000, 4000},
|
||||
expected: [8]uint32{1000, 0, 2000, 0, 3000, 0, 4000, 0},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint64ArrToUint32Arr(tc.input)
|
||||
if got != tc.expected {
|
||||
t.Errorf("got %v, want %v", got, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bn254
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbn254
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
|
||||
scalarVec1C := (*C.BN254_scalar_t)(scalarVec1)
|
||||
scalarVec2C := (*C.BN254_scalar_t)(scalarVec2)
|
||||
sizeC := C.size_t(size)
|
||||
|
||||
ret := C.vec_mod_mult_device_scalar_bn254(scalarVec1C, scalarVec2C, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error multiplying scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbw6761
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
const SCALAR_SIZE = 12
|
||||
const BASE_SIZE = 24
|
||||
|
||||
type G1ScalarField struct {
|
||||
S [SCALAR_SIZE]uint32
|
||||
}
|
||||
|
||||
type G1BaseField struct {
|
||||
S [BASE_SIZE]uint32
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField Constructors
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) SetZero() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1BaseField) SetOne() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
|
||||
S[0] = 1
|
||||
|
||||
f.S = S
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
|
||||
out := (*C.BW6761_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BW6761_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_bw6_761(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
|
||||
copy(f.S[:], limbs[:])
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField methods
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1BaseField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (p *G1ScalarField) Random() *G1ScalarField {
|
||||
outC := (*C.BW6761_scalar_t)(unsafe.Pointer(p))
|
||||
C.random_scalar_bw6_761(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetZero() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetOne() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
S[0] = 1
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
|
||||
for i, v := range a.S {
|
||||
if b.S[i] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* PointBW6761
|
||||
*/
|
||||
|
||||
type G1ProjectivePoint struct {
|
||||
X, Y, Z G1BaseField
|
||||
}
|
||||
|
||||
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
|
||||
var yOne G1BaseField
|
||||
yOne.SetOne()
|
||||
|
||||
var xZero G1BaseField
|
||||
xZero.SetZero()
|
||||
|
||||
var zZero G1BaseField
|
||||
zZero.SetZero()
|
||||
|
||||
f.X = xZero
|
||||
f.Y = yOne
|
||||
f.Z = zZero
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
|
||||
// Cast *PointBW6761 to *C.BW6761_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It'S your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BW6761_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BW6761_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it'S fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_bw6_761(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) IsOnCurve() bool {
|
||||
point := (*C.BW6761_projective_t)(unsafe.Pointer(p))
|
||||
res := C.projective_is_on_curve_bw6_761(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
|
||||
outC := (*C.BW6761_projective_t)(unsafe.Pointer(p))
|
||||
C.random_projective_bw6_761(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
|
||||
return &G1PointAffine{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
var _z G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(x))
|
||||
_y.FromLimbs(GetFixedLimbs(y))
|
||||
_z.FromLimbs(GetFixedLimbs(z))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
p.Z = _z
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* PointAffineNoInfinityBW6761
|
||||
*/
|
||||
|
||||
type G1PointAffine struct {
|
||||
X, Y G1BaseField
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
|
||||
in := (*C.BW6761_projective_t)(unsafe.Pointer(projective))
|
||||
out := (*C.BW6761_affine_t)(unsafe.Pointer(p))
|
||||
|
||||
C.projective_to_affine_bw6_761(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
|
||||
var Z G1BaseField
|
||||
Z.SetOne()
|
||||
|
||||
return &G1ProjectivePoint{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
Z: Z,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(X))
|
||||
_y.FromLimbs(GetFixedLimbs(Y))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiplication
|
||||
*/
|
||||
|
||||
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
pointsC := (*C.BW6761_projective_t)(unsafe.Pointer(&a[0]))
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_point_bw6_761(pointsC, scalarsC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
aC := (*C.BW6761_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BW6761_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_scalar_bw6_761(aC, bC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
//
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
c := make([]G1ScalarField, len(b))
|
||||
for i := range c {
|
||||
var p G1ScalarField
|
||||
p.SetZero()
|
||||
|
||||
c[i] = p
|
||||
}
|
||||
|
||||
aC := (*C.BW6761_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.BW6761_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
cC := (*C.BW6761_scalar_t)(unsafe.Pointer(&c[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.matrix_vec_mod_mult_bw6_761(aC, bC, cC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
|
||||
if len(*slice) <= BASE_SIZE {
|
||||
limbs := [BASE_SIZE]uint32{}
|
||||
copy(limbs[:len(*slice)], *slice)
|
||||
return limbs
|
||||
}
|
||||
|
||||
panic("slice has too many elements")
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func generateUint32Array(length int, isZero bool) []uint32 {
|
||||
arr := make([]uint32, length)
|
||||
for i := 0; i < length; i++ {
|
||||
if isZero {
|
||||
arr[i] = 0x0
|
||||
} else {
|
||||
arr[i] = uint32(i + 1) // You can modify this line to fill the array as needed
|
||||
}
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
func TestNewFieldBW6761One(t *testing.T) {
|
||||
var oneField G1BaseField
|
||||
oneField.SetOne()
|
||||
|
||||
rawOneField := [24]uint32([24]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, oneField.S, rawOneField)
|
||||
}
|
||||
|
||||
func TestNewFieldBW6761Zero(t *testing.T) {
|
||||
var zeroField G1BaseField
|
||||
zeroField.SetZero()
|
||||
|
||||
rawZeroField := [24]uint32([24]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, zeroField.S, rawZeroField)
|
||||
}
|
||||
|
||||
func TestFieldBW6761ToBytesLe(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
|
||||
for i, v := range p.X.S {
|
||||
binary.LittleEndian.PutUint32(expected[i*4:], v)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.X.ToBytesLe(), expected)
|
||||
assert.Equal(t, len(p.X.ToBytesLe()), 96)
|
||||
}
|
||||
|
||||
func TestNewPointBW6761Zero(t *testing.T) {
|
||||
var pointZero G1ProjectivePoint
|
||||
pointZero.SetZero()
|
||||
|
||||
var baseOne G1BaseField
|
||||
baseOne.SetOne()
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, pointZero.X, zeroSanity)
|
||||
assert.Equal(t, pointZero.Y, baseOne)
|
||||
assert.Equal(t, pointZero.Z, zeroSanity)
|
||||
}
|
||||
|
||||
func TestFromProjectiveToAffine(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var projective G1ProjectivePoint
|
||||
var affine G1PointAffine
|
||||
|
||||
projective.Random()
|
||||
|
||||
affine.FromProjective(&projective)
|
||||
var projective2 G1ProjectivePoint
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestBW6761Eq(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
var p2 G1ProjectivePoint
|
||||
p2.Random()
|
||||
|
||||
assert.Equal(t, p1.Eq(&p1), true)
|
||||
assert.Equal(t, p1.Eq(&p2), false)
|
||||
}
|
||||
|
||||
func TestBW6761StripZ(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
|
||||
p2ZLess := p1.StripZ()
|
||||
|
||||
assert.IsType(t, G1PointAffine{}, *p2ZLess)
|
||||
assert.Equal(t, p1.X, p2ZLess.X)
|
||||
assert.Equal(t, p1.Y, p2ZLess.Y)
|
||||
}
|
||||
|
||||
func TestPointBW6761fromLimbs(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
x := p.X.Limbs()
|
||||
y := p.Y.Limbs()
|
||||
z := p.Z.Limbs()
|
||||
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
zSlice := z[:]
|
||||
|
||||
var pFromLimbs G1ProjectivePoint
|
||||
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
|
||||
|
||||
assert.Equal(t, pFromLimbs, p)
|
||||
}
|
||||
|
||||
func TestNewPointAffineNoInfinityBW6761Zero(t *testing.T) {
|
||||
var zeroP G1PointAffine
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, zeroP.X, zeroSanity)
|
||||
assert.Equal(t, zeroP.Y, zeroSanity)
|
||||
}
|
||||
|
||||
func TestPointAffineNoInfinityBW6761FromLimbs(t *testing.T) {
|
||||
// Initialize your test values
|
||||
x := [24]uint32{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||
y := [24]uint32{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
|
||||
// Execute your function
|
||||
var result G1PointAffine
|
||||
result.FromLimbs(&xSlice, &ySlice)
|
||||
|
||||
var xBase G1BaseField
|
||||
var yBase G1BaseField
|
||||
xBase.FromLimbs(x)
|
||||
yBase.FromLimbs(y)
|
||||
|
||||
// Define your expected result
|
||||
expected := G1PointAffine{
|
||||
X: xBase,
|
||||
Y: yBase,
|
||||
}
|
||||
|
||||
// Test if result is as expected
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestGetFixedLimbs(t *testing.T) {
|
||||
t.Run("case of valid input of length less than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
|
||||
expected := [24]uint32{1, 2, 3, 4, 5, 6, 7, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of valid input of length 24", func(t *testing.T) {
|
||||
slice := generateUint32Array(24, false)
|
||||
expected := [24]uint32(generateUint32Array(24, false))
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of empty input", func(t *testing.T) {
|
||||
slice := []uint32{}
|
||||
expected := [24]uint32(generateUint32Array(24, true))
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of input length greater than 24", func(t *testing.T) {
|
||||
slice := generateUint32Array(25, false)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("the code did not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
GetFixedLimbs(&slice)
|
||||
})
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbw6761
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
// G2 extension field
|
||||
|
||||
type G2Element [12]uint64
|
||||
|
||||
type G2PointAffine struct {
|
||||
X, Y G2Element
|
||||
}
|
||||
|
||||
type G2Point struct {
|
||||
X, Y, Z G2Element
|
||||
}
|
||||
|
||||
func (p *G2Point) Random() *G2Point {
|
||||
outC := (*C.BW6761_g2_projective_t)(unsafe.Pointer(p))
|
||||
C.random_g2_projective_bw6_761(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
|
||||
out := (*C.BW6761_g2_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.BW6761_g2_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.g2_projective_from_affine_bw6_761(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) Eq(pCompare *G2Point) bool {
|
||||
// Cast *PointBW6761 to *C.BW6761_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It's your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.BW6761_g2_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.BW6761_g2_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it's fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_g2_bw6_761(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (f *G2Element) ToBytesLe() []byte {
|
||||
var bytes []byte
|
||||
for _, val := range f {
|
||||
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
|
||||
binary.LittleEndian.PutUint64(buf, val)
|
||||
bytes = append(bytes, buf...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
|
||||
out := (*C.BW6761_g2_affine_t)(unsafe.Pointer(p))
|
||||
in := (*C.BW6761_g2_projective_t)(unsafe.Pointer(projective))
|
||||
|
||||
C.g2_projective_to_affine_bw6_761(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) IsOnCurve() bool {
|
||||
// Directly copy memory from the C struct to the Go struct
|
||||
point := (*C.BW6761_g2_projective_t)(unsafe.Pointer(p))
|
||||
res := C.g2_projective_is_on_curve_bw6_761(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestG2Eqg2(t *testing.T) {
|
||||
var point G2Point
|
||||
|
||||
point.Random()
|
||||
|
||||
assert.True(t, point.Eq(&point))
|
||||
}
|
||||
|
||||
func TestG2FromProjectiveToAffine(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var projective G2Point
|
||||
projective.Random()
|
||||
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&projective)
|
||||
|
||||
var projective2 G2Point
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestG2Eqg2NotEqual(t *testing.T) {
|
||||
var point G2Point
|
||||
point.Random()
|
||||
|
||||
var point2 G2Point
|
||||
point2.Random()
|
||||
|
||||
assert.False(t, point.Eq(&point2))
|
||||
}
|
||||
|
||||
func TestG2ToBytes(t *testing.T) {
|
||||
var point G2Point
|
||||
var element G2Element
|
||||
point.Random()
|
||||
bytes := point.X.ToBytesLe()
|
||||
|
||||
assert.Equal(t, len(bytes), int(unsafe.Sizeof(element)))
|
||||
}
|
||||
|
||||
func TestG2ShouldConvertToProjective(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var pointProjective G2Point
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G2PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
var proj G2Point
|
||||
proj.FromAffine(&pointAffine)
|
||||
|
||||
assert.True(t, proj.IsOnCurve())
|
||||
assert.True(t, pointProjective.Eq(&proj))
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdbool.h>
|
||||
// msm.h
|
||||
|
||||
#ifndef _BW6761_MSM_H
|
||||
#define _BW6761_MSM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BW6761 projective and affine structs
|
||||
typedef struct BW6761_projective_t BW6761_projective_t;
|
||||
typedef struct BW6761_g2_projective_t BW6761_g2_projective_t;
|
||||
typedef struct BW6761_affine_t BW6761_affine_t;
|
||||
typedef struct BW6761_g2_affine_t BW6761_g2_affine_t;
|
||||
typedef struct BW6761_scalar_t BW6761_scalar_t;
|
||||
typedef cudaStream_t CudaStream_t;
|
||||
|
||||
int msm_cuda_bw6_761(
|
||||
BW6761_projective_t* out, BW6761_affine_t* points, BW6761_scalar_t* scalars, size_t count, size_t device_id);
|
||||
|
||||
int msm_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* out,
|
||||
BW6761_affine_t* points,
|
||||
BW6761_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_scalar_t* d_scalars,
|
||||
BW6761_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_scalar_t* d_scalars,
|
||||
BW6761_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id);
|
||||
|
||||
int msm_g2_cuda_bw6_761(
|
||||
BW6761_g2_projective_t* out,
|
||||
BW6761_g2_affine_t* points,
|
||||
BW6761_scalar_t* scalars,
|
||||
size_t count,
|
||||
size_t device_id);
|
||||
|
||||
int msm_batch_g2_cuda_bw6_761(
|
||||
BW6761_g2_projective_t* out,
|
||||
BW6761_g2_affine_t* points,
|
||||
BW6761_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_g2_cuda_bw6_761(
|
||||
BW6761_g2_projective_t* d_out,
|
||||
BW6761_scalar_t* d_scalars,
|
||||
BW6761_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_g2_cuda_bw6_761(
|
||||
BW6761_g2_projective_t* d_out,
|
||||
BW6761_scalar_t* d_scalars,
|
||||
BW6761_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id,
|
||||
cudaStream_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BW6761_MSM_H */
|
||||
@@ -1,198 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ntt.h
|
||||
|
||||
#ifndef _BW6761_NTT_H
|
||||
#define _BW6761_NTT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of BW6761 projective and affine structs
|
||||
typedef struct BW6761_projective_t BW6761_projective_t;
|
||||
typedef struct BW6761_affine_t BW6761_affine_t;
|
||||
typedef struct BW6761_scalar_t BW6761_scalar_t;
|
||||
|
||||
typedef struct BW6761_g2_projective_t BW6761_g2_projective_t;
|
||||
typedef struct BW6761_g2_affine_t BW6761_g2_affine_t;
|
||||
|
||||
int ntt_cuda_bw6_761(BW6761_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ntt_batch_cuda_bw6_761(
|
||||
BW6761_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
int ecntt_cuda_bw6_761(BW6761_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ecntt_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
BW6761_scalar_t*
|
||||
build_domain_cuda_bw6_761(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
|
||||
|
||||
int interpolate_scalars_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_on_coset_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_on_coset_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_evaluations,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
|
||||
int evaluate_scalars_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_batch_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_batch_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out,
|
||||
BW6761_scalar_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* d_out,
|
||||
BW6761_projective_t* d_coefficients,
|
||||
BW6761_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
BW6761_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
|
||||
int reverse_order_scalars_cuda_bw6_761(BW6761_scalar_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_scalars_batch_cuda_bw6_761(
|
||||
BW6761_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int reverse_order_points_cuda_bw6_761(BW6761_projective_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_points_batch_cuda_bw6_761(
|
||||
BW6761_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int add_scalars_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out, BW6761_scalar_t* d_in1, BW6761_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int sub_scalars_cuda_bw6_761(
|
||||
BW6761_scalar_t* d_out, BW6761_scalar_t* d_in1, BW6761_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int to_montgomery_scalars_cuda_bw6_761(BW6761_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_scalars_cuda_bw6_761(BW6761_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g1
|
||||
int to_montgomery_proj_points_cuda_bw6_761(BW6761_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_cuda_bw6_761(BW6761_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_cuda_bw6_761(BW6761_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_cuda_bw6_761(BW6761_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g2
|
||||
int to_montgomery_proj_points_g2_cuda_bw6_761(BW6761_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_g2_cuda_bw6_761(BW6761_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_g2_cuda_bw6_761(BW6761_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_g2_cuda_bw6_761(BW6761_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BW6761_NTT_H */
|
||||
@@ -1,50 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// projective.h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BW6761_projective_t BW6761_projective_t;
|
||||
typedef struct BW6761_g2_projective_t BW6761_g2_projective_t;
|
||||
typedef struct BW6761_affine_t BW6761_affine_t;
|
||||
typedef struct BW6761_g2_affine_t BW6761_g2_affine_t;
|
||||
typedef struct BW6761_scalar_t BW6761_scalar_t;
|
||||
|
||||
bool projective_is_on_curve_bw6_761(BW6761_projective_t* point1);
|
||||
|
||||
int random_scalar_bw6_761(BW6761_scalar_t* out);
|
||||
int random_projective_bw6_761(BW6761_projective_t* out);
|
||||
BW6761_projective_t* projective_zero_bw6_761();
|
||||
int projective_to_affine_bw6_761(BW6761_affine_t* out, BW6761_projective_t* point1);
|
||||
int projective_from_affine_bw6_761(BW6761_projective_t* out, BW6761_affine_t* point1);
|
||||
|
||||
int random_g2_projective_bw6_761(BW6761_g2_projective_t* out);
|
||||
int g2_projective_to_affine_bw6_761(BW6761_g2_affine_t* out, BW6761_g2_projective_t* point1);
|
||||
int g2_projective_from_affine_bw6_761(BW6761_g2_projective_t* out, BW6761_g2_affine_t* point1);
|
||||
bool g2_projective_is_on_curve_bw6_761(BW6761_g2_projective_t* point1);
|
||||
|
||||
bool eq_bw6_761(BW6761_projective_t* point1, BW6761_projective_t* point2);
|
||||
bool eq_g2_bw6_761(BW6761_g2_projective_t* point1, BW6761_g2_projective_t* point2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,49 +0,0 @@
|
||||
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <cuda.h>
|
||||
// ve_mod_mult.h
|
||||
|
||||
#ifndef _BW6761_VEC_MULT_H
|
||||
#define _BW6761_VEC_MULT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct BW6761_projective_t BW6761_projective_t;
|
||||
typedef struct BW6761_scalar_t BW6761_scalar_t;
|
||||
|
||||
int32_t vec_mod_mult_point_bw6_761(
|
||||
BW6761_projective_t* inout, BW6761_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_scalar_bw6_761(
|
||||
BW6761_scalar_t* inout, BW6761_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_device_scalar_bw6_761(
|
||||
BW6761_scalar_t* inout, BW6761_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
|
||||
int32_t matrix_vec_mod_mult_bw6_761(
|
||||
BW6761_scalar_t* matrix_flattened,
|
||||
BW6761_scalar_t* input,
|
||||
BW6761_scalar_t* output,
|
||||
size_t n_elments,
|
||||
size_t device_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BW6761_VEC_MULT_H */
|
||||
@@ -1,209 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbw6761
|
||||
// #include "msm.h"
|
||||
import "C"
|
||||
|
||||
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BW6761_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BW6761_projective_t)(unsafe.Pointer(out))
|
||||
ret := C.msm_cuda_bw6_761(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_cuda_bw6_761 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.BW6761_g2_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.BW6761_g2_projective_t)(unsafe.Pointer(out))
|
||||
|
||||
ret := C.msm_g2_cuda_bw6_761(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_g2_cuda_bw6_761 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G1ProjectivePoint, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G1ProjectivePoint, batchSize)
|
||||
|
||||
for i := 0; i < len(out); i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.SetZero()
|
||||
|
||||
out[i] = p
|
||||
}
|
||||
|
||||
outC := (*C.BW6761_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BW6761_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_cuda_bw6_761(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bw6_761 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G2Point, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G2Point, batchSize)
|
||||
|
||||
outC := (*C.BW6761_g2_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.BW6761_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bw6_761(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_bw6_761 returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BW6761_projective_t)(d_out)
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BW6761_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_cuda_bw6_761(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.BW6761_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BW6761_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_g2_cuda_bw6_761(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BW6761_projective_t)(d_out)
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BW6761_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.commit_batch_cuda_bw6_761(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.BW6761_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
pointsC := (*C.BW6761_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_bw6_761(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,367 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func GeneratePoints(count int) []G1PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G1PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
var pointProjective G1ProjectivePoint
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G1PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
points = append(points, pointAffine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func GeneratePointsProj(count int) []G1ProjectivePoint {
|
||||
// Declare a slice of integers
|
||||
var points []G1ProjectivePoint
|
||||
// Use a loop to populate the slice
|
||||
for i := 0; i < count; i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
points = append(points, p)
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func GenerateScalars(count int, skewed bool) []G1ScalarField {
|
||||
// Declare a slice of integers
|
||||
var scalars []G1ScalarField
|
||||
|
||||
var rand G1ScalarField
|
||||
var zero G1ScalarField
|
||||
var one G1ScalarField
|
||||
var randLarge G1ScalarField
|
||||
|
||||
zero.SetZero()
|
||||
one.SetOne()
|
||||
randLarge.Random()
|
||||
|
||||
if skewed && count > 1_200_000 {
|
||||
for i := 0; i < count-1_200_000; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
|
||||
for i := 0; i < 600_000; i++ {
|
||||
scalars = append(scalars, randLarge)
|
||||
}
|
||||
for i := 0; i < 400_000; i++ {
|
||||
scalars = append(scalars, zero)
|
||||
}
|
||||
for i := 0; i < 200_000; i++ {
|
||||
scalars = append(scalars, one)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
}
|
||||
|
||||
return scalars[:count]
|
||||
}
|
||||
|
||||
func TestMSM(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G1ProjectivePoint)
|
||||
startTime := time.Now()
|
||||
_, e := Msm(out, points, scalars, 0) // non mont
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1<<v - 1
|
||||
fmt.Print("Started generating points and scalars\n")
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating points and scalars\n")
|
||||
|
||||
var sizeOutD G1ProjectivePoint
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeOutD)))
|
||||
|
||||
var sizePoints G1PointAffine
|
||||
pointsBytes := count * int(unsafe.Sizeof(sizePoints))
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
var sizeScalar G1ScalarField
|
||||
scalarBytes := count * int(unsafe.Sizeof(sizeScalar))
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := Commit(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, int(unsafe.Sizeof(sizeOutD)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.True(t, outHost[0].IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommit(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := msmSize * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := msmSize * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
|
||||
|
||||
assert.Equal(b, e, 0, "error should be 0")
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 288)
|
||||
assert.True(b, outHost[0].IsOnCurve())
|
||||
if e != 0 {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchMSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmBatch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBW6761 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMSM(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G1ProjectivePoint)
|
||||
_, e := Msm(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// G2
|
||||
func GenerateG2Points(count int) []G2PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G2PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var p G2Point
|
||||
p.Random()
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&p)
|
||||
|
||||
points = append(points, affine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func TestMsmG2BW6761(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMsmG2BW6761(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GenerateG2Points(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitG2MSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
var sizeCheckG2PointAffine G2PointAffine
|
||||
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
|
||||
|
||||
var sizeCheckG2Point G2Point
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := CommitG2(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G2Point, 1)
|
||||
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.Equal(t, len(outHost), 1)
|
||||
result := outHost[0]
|
||||
|
||||
assert.True(t, result.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchG2MSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatchBW6761 returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbw6761
|
||||
// #include "ntt.h"
|
||||
import "C"
|
||||
|
||||
const (
|
||||
NONE = 0
|
||||
DIF = 1
|
||||
DIT = 2
|
||||
)
|
||||
|
||||
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
|
||||
ret := C.ntt_cuda_bw6_761(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
scalarsC := (*C.BW6761_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
isInverseC := C.bool(isInverse)
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
|
||||
ret := C.ntt_batch_cuda_bw6_761(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
|
||||
valuesC := (*C.BW6761_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
|
||||
ret := C.ecntt_cuda_bw6_761(valuesC, n, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
valuesC := (*C.BW6761_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
|
||||
ret := C.ecntt_batch_cuda_bw6_761(valuesC, n, batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
|
||||
domain_size := C.uint32_t(d_size)
|
||||
logn := C.uint32_t(log_d_size)
|
||||
is_inverse := C.bool(inverse)
|
||||
|
||||
dp := C.build_domain_cuda_bw6_761(domain_size, logn, is_inverse, 0, 0)
|
||||
|
||||
if dp == nil {
|
||||
err = errors.New("nullptr returned from generating twiddles")
|
||||
return unsafe.Pointer(nil), err
|
||||
}
|
||||
|
||||
return unsafe.Pointer(dp), nil
|
||||
}
|
||||
|
||||
// Reverses d_scalars in-place
|
||||
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
lenC := C.int(len)
|
||||
if success := C.reverse_order_scalars_cuda_bw6_761(scalarsC, lenC, 0, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
|
||||
size_d := size * 48
|
||||
dp, err := goicicle.CudaMalloc(size_d)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d_out := (*C.BW6761_scalar_t)(dp)
|
||||
scalarsC := (*C.BW6761_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BW6761_scalar_t)(twiddles)
|
||||
cosetPowersC := (*C.BW6761_scalar_t)(cosetPowers)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.interpolate_scalars_on_coset_cuda_bw6_761(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
|
||||
} else {
|
||||
ret = C.interpolate_scalars_cuda_bw6_761(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
|
||||
}
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
}
|
||||
|
||||
return unsafe.Pointer(d_out)
|
||||
}
|
||||
|
||||
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
|
||||
scalars_outC := (*C.BW6761_scalar_t)(scalars_out)
|
||||
scalarsC := (*C.BW6761_scalar_t)(scalars)
|
||||
twiddlesC := (*C.BW6761_scalar_t)(twiddles)
|
||||
coset_powersC := (*C.BW6761_scalar_t)(coset_powers)
|
||||
sizeC := C.uint(scalars_size)
|
||||
twiddlesC_size := C.uint(twiddles_size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.evaluate_scalars_on_coset_cuda_bw6_761(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
|
||||
} else {
|
||||
ret = C.evaluate_scalars_cuda_bw6_761(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
|
||||
}
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BW6761_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BW6761_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.add_scalars_cuda_bw6_761(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error adding scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.BW6761_scalar_t)(in1_d)
|
||||
in2_dC := (*C.BW6761_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.sub_scalars_cuda_bw6_761(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error subtracting scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.to_montgomery_scalars_cuda_bw6_761(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.BW6761_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.from_montgomery_scalars_cuda_bw6_761(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BW6761_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_cuda_bw6_761(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.BW6761_g2_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_g2_cuda_bw6_761(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNttBW6761Batch(t *testing.T) {
|
||||
count := 1 << 20
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
NttBatch(&nttResult, false, count, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBW6761CompareToGnarkDIF(t *testing.T) {
|
||||
count := 1 << 2
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestINttBW6761CompareToGnarkDIT(t *testing.T) {
|
||||
count := 1 << 3
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, true, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNttBW6761(t *testing.T) {
|
||||
count := 1 << 3
|
||||
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
inttResult := make([]G1ScalarField, len(nttResult))
|
||||
copy(inttResult, nttResult)
|
||||
|
||||
assert.Equal(t, inttResult, nttResult)
|
||||
Ntt(&inttResult, true, 0)
|
||||
assert.Equal(t, inttResult, scalars)
|
||||
}
|
||||
|
||||
func TestNttBatchBW6761(t *testing.T) {
|
||||
count := 1 << 5
|
||||
batches := 4
|
||||
|
||||
scalars := GenerateScalars(count*batches, false)
|
||||
|
||||
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
start := i * count
|
||||
end := (i + 1) * count
|
||||
batch := make([]G1ScalarField, len(scalars[start:end]))
|
||||
copy(batch, scalars[start:end])
|
||||
scalarVecOfVec = append(scalarVecOfVec, batch)
|
||||
}
|
||||
|
||||
nttBatchResult := make([]G1ScalarField, len(scalars))
|
||||
copy(nttBatchResult, scalars)
|
||||
|
||||
NttBatch(&nttBatchResult, false, count, 0)
|
||||
|
||||
var nttResultVecOfVec [][]G1ScalarField
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
// Clone the slice
|
||||
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
|
||||
copy(clone, scalarVecOfVec[i])
|
||||
|
||||
// Add it to the result vector of vectors
|
||||
nttResultVecOfVec = append(nttResultVecOfVec, clone)
|
||||
|
||||
// Call the ntt_bw6_761 function
|
||||
Ntt(&nttResultVecOfVec[i], false, 0)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nttBatchResult, scalars)
|
||||
|
||||
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i := 0; i < batches; i++ {
|
||||
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
|
||||
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNTT(b *testing.B) {
|
||||
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logNTTSize := range LOG_NTT_SIZES {
|
||||
nttSize := 1 << logNTTSize
|
||||
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
|
||||
scalars := GenerateScalars(nttSize, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
for n := 0; n < b.N; n++ {
|
||||
Ntt(&nttResult, false, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2023 Ingonyama
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by Ingonyama DO NOT EDIT
|
||||
|
||||
package bw6761
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ -lbw6761
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
|
||||
scalarVec1C := (*C.BW6761_scalar_t)(scalarVec1)
|
||||
scalarVec2C := (*C.BW6761_scalar_t)(scalarVec2)
|
||||
sizeC := C.size_t(size)
|
||||
|
||||
ret := C.vec_mod_mult_device_scalar_bw6_761(scalarVec1C, scalarVec2C, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error multiplying scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package goicicle
|
||||
|
||||
// This file implements CUDA driver context management
|
||||
|
||||
// #cgo CFLAGS: -I /usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L/usr/local/cuda/lib64 -lcudart
|
||||
/*
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func CudaMalloc(size int) (dp unsafe.Pointer, err error) {
|
||||
var p C.void
|
||||
dp = unsafe.Pointer(&p)
|
||||
if err := C.cudaMalloc(&dp, C.size_t(size)); err != 0 {
|
||||
return nil, errors.New("could not create memory space")
|
||||
}
|
||||
return dp, nil
|
||||
}
|
||||
|
||||
func CudaFree(dp unsafe.Pointer) int {
|
||||
if err := C.cudaFree(dp); err != 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func CudaMemCpyHtoD[T any](dst_d unsafe.Pointer, src []T, size int) int {
|
||||
src_c := unsafe.Pointer(&src[0])
|
||||
if err := C.cudaMemcpy(dst_d, src_c, C.size_t(size), 1); err != 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func CudaMemCpyDtoH[T any](dst []T, src_d unsafe.Pointer, size int) int {
|
||||
dst_c := unsafe.Pointer(&dst[0])
|
||||
|
||||
if err := C.cudaMemcpy(dst_c, src_d, C.size_t(size), 2); err != 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SUDO=''
|
||||
if [ "$EUID" != 0 ]; then
|
||||
echo "Icicle setup script should be run with root privileges, please run this as root"
|
||||
SUDO='sudo'
|
||||
fi
|
||||
|
||||
|
||||
TARGET_BN254="libbn254.so"
|
||||
TARGET_BLS12_381="libbls12_381.so"
|
||||
TARGET_BLS12_377="libbls12_377.so"
|
||||
TARGET_BW6_671="libbw6_671.so"
|
||||
|
||||
MAKE_FAIL=0
|
||||
|
||||
$SUDO make $1 || MAKE_FAIL=1
|
||||
|
||||
if [ $MAKE_FAIL != 0 ]; then
|
||||
echo "make failed, install dependencies and re-run setup script with root privileges"
|
||||
exit
|
||||
fi
|
||||
|
||||
TARGET_BN254_PATH=$(dirname "$(find `pwd` -name $TARGET_BN254 -print -quit)")/
|
||||
TARGET_BLS12_381_PATH=$(dirname "$(find `pwd` -name $TARGET_BLS12_381 -print -quit)")/
|
||||
TARGET_BLS12_377_PATH=$(dirname "$(find `pwd` -name $TARGET_BLS12_377 -print -quit)")/
|
||||
TARGET_BW6_671_PATH=$(dirname "$(find `pwd` -name $TARGET_BW6_671 -print -quit)")/
|
||||
|
||||
|
||||
if [[ "$TARGET_BLS12_377_PATH" != "" ]]; then
|
||||
echo "BLS12_377 found @ $TARGET_BLS12_377_PATH"
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BLS12_377_PATH
|
||||
fi
|
||||
|
||||
if [[ "$TARGET_BN254_PATH" != "" ]]; then
|
||||
echo "BN254 found @ $TARGET_BN254_PATH"
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BN254_PATH
|
||||
fi
|
||||
|
||||
if [[ "$TARGET_BLS12_381_PATH" != "" ]]; then
|
||||
echo "BLS12_381 found @ $TARGET_BLS12_381_PATH"
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BLS12_381_PATH
|
||||
fi
|
||||
|
||||
if [[ "$TARGET_BW6_671_PATH" != "" ]]; then
|
||||
echo "BW6_671 found @ $TARGET_BW6_671_PATH"
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$TARGET_BW6_671_PATH
|
||||
fi
|
||||
@@ -1,52 +0,0 @@
|
||||
package config
|
||||
|
||||
// {{.SharedLib}}
|
||||
type Curve struct {
|
||||
PackageName string
|
||||
CurveNameUpperCase string
|
||||
CurveNameLowerCase string
|
||||
SharedLib string
|
||||
ScalarSize int
|
||||
BaseSize int
|
||||
G2ElementSize int
|
||||
}
|
||||
|
||||
var BW6_761 = Curve{
|
||||
PackageName: "bw6761",
|
||||
CurveNameUpperCase: "BW6761",
|
||||
CurveNameLowerCase: "bw6_761",
|
||||
SharedLib: "-lbw6761",
|
||||
ScalarSize: 12,
|
||||
BaseSize: 24,
|
||||
G2ElementSize: 6,
|
||||
}
|
||||
|
||||
var BN_254 = Curve{
|
||||
PackageName: "bn254",
|
||||
CurveNameUpperCase: "BN254",
|
||||
CurveNameLowerCase: "bn254",
|
||||
SharedLib: "-lbn254",
|
||||
ScalarSize: 8,
|
||||
BaseSize: 8,
|
||||
G2ElementSize: 4,
|
||||
}
|
||||
|
||||
var BLS_12_377 = Curve{
|
||||
PackageName: "bls12377",
|
||||
CurveNameUpperCase: "BLS12_377",
|
||||
CurveNameLowerCase: "bls12_377",
|
||||
SharedLib: "-lbls12_377",
|
||||
ScalarSize: 8,
|
||||
BaseSize: 12,
|
||||
G2ElementSize: 6,
|
||||
}
|
||||
|
||||
var BLS_12_381 = Curve{
|
||||
PackageName: "bls12381",
|
||||
CurveNameUpperCase: "BLS12_381",
|
||||
CurveNameLowerCase: "bls12_381",
|
||||
SharedLib: "-lbls12_381",
|
||||
ScalarSize: 8,
|
||||
BaseSize: 12,
|
||||
G2ElementSize: 6,
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
const SCALAR_SIZE = {{.ScalarSize}}
|
||||
const BASE_SIZE = {{.BaseSize}}
|
||||
|
||||
type G1ScalarField struct {
|
||||
S [SCALAR_SIZE]uint32
|
||||
}
|
||||
|
||||
type G1BaseField struct {
|
||||
S [BASE_SIZE]uint32
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField Constructors
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) SetZero() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1BaseField) SetOne() *G1BaseField {
|
||||
var S [BASE_SIZE]uint32
|
||||
|
||||
S[0] = 1
|
||||
|
||||
f.S = S
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromAffine(affine *G1PointAffine) *G1ProjectivePoint {
|
||||
out := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.projective_from_affine_{{.CurveNameLowerCase}}(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1BaseField) FromLimbs(limbs [BASE_SIZE]uint32) *G1BaseField {
|
||||
copy(f.S[:], limbs[:])
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
* BaseField methods
|
||||
*/
|
||||
|
||||
func (f *G1BaseField) Limbs() [BASE_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1BaseField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (p *G1ScalarField) Random() *G1ScalarField {
|
||||
outC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(p))
|
||||
C.random_scalar_{{.CurveNameLowerCase}}(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetZero() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) SetOne() *G1ScalarField {
|
||||
var S [SCALAR_SIZE]uint32
|
||||
S[0] = 1
|
||||
f.S = S
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (a *G1ScalarField) Eq(b *G1ScalarField) bool {
|
||||
for i, v := range a.S {
|
||||
if b.S[i] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
* ScalarField methods
|
||||
*/
|
||||
|
||||
func (f *G1ScalarField) Limbs() [SCALAR_SIZE]uint32 {
|
||||
return f.S
|
||||
}
|
||||
|
||||
func (f *G1ScalarField) ToBytesLe() []byte {
|
||||
bytes := make([]byte, len(f.S)*4)
|
||||
for i, v := range f.S {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
/*
|
||||
* Point{{.CurveNameUpperCase}}
|
||||
*/
|
||||
|
||||
type G1ProjectivePoint struct {
|
||||
X, Y, Z G1BaseField
|
||||
}
|
||||
|
||||
func (f *G1ProjectivePoint) SetZero() *G1ProjectivePoint {
|
||||
var yOne G1BaseField
|
||||
yOne.SetOne()
|
||||
|
||||
var xZero G1BaseField
|
||||
xZero.SetZero()
|
||||
|
||||
var zZero G1BaseField
|
||||
zZero.SetZero()
|
||||
|
||||
f.X = xZero
|
||||
f.Y = yOne
|
||||
f.Z = zZero
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Eq(pCompare *G1ProjectivePoint) bool {
|
||||
// Cast *Point{{.CurveNameUpperCase}} to *C.{{.CurveNameUpperCase}}_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It'S your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it'S fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_{{.CurveNameLowerCase}}(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) IsOnCurve() bool {
|
||||
point := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
|
||||
res := C.projective_is_on_curve_{{.CurveNameLowerCase}}(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) Random() *G1ProjectivePoint {
|
||||
outC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(p))
|
||||
C.random_projective_{{.CurveNameLowerCase}}(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) StripZ() *G1PointAffine {
|
||||
return &G1PointAffine{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1ProjectivePoint) FromLimbs(x, y, z *[]uint32) *G1ProjectivePoint {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
var _z G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(x))
|
||||
_y.FromLimbs(GetFixedLimbs(y))
|
||||
_z.FromLimbs(GetFixedLimbs(z))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
p.Z = _z
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* PointAffineNoInfinity{{.CurveNameUpperCase}}
|
||||
*/
|
||||
|
||||
type G1PointAffine struct {
|
||||
X, Y G1BaseField
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromProjective(projective *G1ProjectivePoint) *G1PointAffine {
|
||||
in := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(projective))
|
||||
out := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(p))
|
||||
|
||||
C.projective_to_affine_{{.CurveNameLowerCase}}(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) ToProjective() *G1ProjectivePoint {
|
||||
var Z G1BaseField
|
||||
Z.SetOne()
|
||||
|
||||
return &G1ProjectivePoint{
|
||||
X: p.X,
|
||||
Y: p.Y,
|
||||
Z: Z,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *G1PointAffine) FromLimbs(X, Y *[]uint32) *G1PointAffine {
|
||||
var _x G1BaseField
|
||||
var _y G1BaseField
|
||||
|
||||
_x.FromLimbs(GetFixedLimbs(X))
|
||||
_y.FromLimbs(GetFixedLimbs(Y))
|
||||
|
||||
p.X = _x
|
||||
p.Y = _y
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiplication
|
||||
*/
|
||||
|
||||
func MultiplyVec(a []G1ProjectivePoint, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(&a[0]))
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_point_{{.CurveNameLowerCase}}(pointsC, scalarsC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
func MultiplyScalar(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
if len(a) != len(b) {
|
||||
panic("a and b have different lengths")
|
||||
}
|
||||
|
||||
aC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.vec_mod_mult_scalar_{{.CurveNameLowerCase}}(aC, bC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
// Multiply a matrix by a scalar:
|
||||
//
|
||||
// `a` - flattenned matrix;
|
||||
// `b` - vector to multiply `a` by;
|
||||
func MultiplyMatrix(a []G1ScalarField, b []G1ScalarField, deviceID int) {
|
||||
c := make([]G1ScalarField, len(b))
|
||||
for i := range c {
|
||||
var p G1ScalarField
|
||||
p.SetZero()
|
||||
|
||||
c[i] = p
|
||||
}
|
||||
|
||||
aC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&a[0]))
|
||||
bC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&b[0]))
|
||||
cC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&c[0]))
|
||||
deviceIdC := C.size_t(deviceID)
|
||||
nElementsC := C.size_t(len(a))
|
||||
|
||||
C.matrix_vec_mod_mult_{{.CurveNameLowerCase}}(aC, bC, cC, nElementsC, deviceIdC)
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
func GetFixedLimbs(slice *[]uint32) [BASE_SIZE]uint32 {
|
||||
if len(*slice) <= BASE_SIZE {
|
||||
limbs := [BASE_SIZE]uint32{}
|
||||
copy(limbs[:len(*slice)], *slice)
|
||||
return limbs
|
||||
}
|
||||
|
||||
panic("slice has too many elements")
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewField{{.CurveNameUpperCase}}One(t *testing.T) {
|
||||
var oneField G1BaseField
|
||||
oneField.SetOne()
|
||||
|
||||
rawOneField := [8]uint32([8]uint32{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, oneField.S, rawOneField)
|
||||
}
|
||||
|
||||
func TestNewField{{.CurveNameUpperCase}}Zero(t *testing.T) {
|
||||
var zeroField G1BaseField
|
||||
zeroField.SetZero()
|
||||
|
||||
rawZeroField := [8]uint32([8]uint32{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
||||
|
||||
assert.Equal(t, zeroField.S, rawZeroField)
|
||||
}
|
||||
|
||||
func TestField{{.CurveNameUpperCase}}ToBytesLe(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
expected := make([]byte, len(p.X.S)*4) // each uint32 takes 4 bytes
|
||||
for i, v := range p.X.S {
|
||||
binary.LittleEndian.PutUint32(expected[i*4:], v)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.X.ToBytesLe(), expected)
|
||||
assert.Equal(t, len(p.X.ToBytesLe()), 32)
|
||||
}
|
||||
|
||||
func TestNewPoint{{.CurveNameUpperCase}}Zero(t *testing.T) {
|
||||
var pointZero G1ProjectivePoint
|
||||
pointZero.SetZero()
|
||||
|
||||
var baseOne G1BaseField
|
||||
baseOne.SetOne()
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, pointZero.X, zeroSanity)
|
||||
assert.Equal(t, pointZero.Y, baseOne)
|
||||
assert.Equal(t, pointZero.Z, zeroSanity)
|
||||
}
|
||||
|
||||
func TestFromProjectiveToAffine(t *testing.T) {
|
||||
var projective G1ProjectivePoint
|
||||
var affine G1PointAffine
|
||||
|
||||
projective.Random()
|
||||
|
||||
affine.FromProjective(&projective)
|
||||
var projective2 G1ProjectivePoint
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func Test{{.CurveNameUpperCase}}Eq(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
var p2 G1ProjectivePoint
|
||||
p2.Random()
|
||||
|
||||
assert.Equal(t, p1.Eq(&p1), true)
|
||||
assert.Equal(t, p1.Eq(&p2), false)
|
||||
}
|
||||
|
||||
func Test{{.CurveNameUpperCase}}StripZ(t *testing.T) {
|
||||
var p1 G1ProjectivePoint
|
||||
p1.Random()
|
||||
|
||||
p2ZLess := p1.StripZ()
|
||||
|
||||
assert.IsType(t, G1PointAffine{}, *p2ZLess)
|
||||
assert.Equal(t, p1.X, p2ZLess.X)
|
||||
assert.Equal(t, p1.Y, p2ZLess.Y)
|
||||
}
|
||||
|
||||
func TestPoint{{.CurveNameUpperCase}}fromLimbs(t *testing.T) {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
x := p.X.Limbs()
|
||||
y := p.Y.Limbs()
|
||||
z := p.Z.Limbs()
|
||||
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
zSlice := z[:]
|
||||
|
||||
var pFromLimbs G1ProjectivePoint
|
||||
pFromLimbs.FromLimbs(&xSlice, &ySlice, &zSlice)
|
||||
|
||||
assert.Equal(t, pFromLimbs, p)
|
||||
}
|
||||
|
||||
func TestNewPointAffineNoInfinity{{.CurveNameUpperCase}}Zero(t *testing.T) {
|
||||
var zeroP G1PointAffine
|
||||
|
||||
var zeroSanity G1BaseField
|
||||
zeroSanity.SetZero()
|
||||
|
||||
assert.Equal(t, zeroP.X, zeroSanity)
|
||||
assert.Equal(t, zeroP.Y, zeroSanity)
|
||||
}
|
||||
|
||||
func TestPointAffineNoInfinity{{.CurveNameUpperCase}}FromLimbs(t *testing.T) {
|
||||
// Initialize your test values
|
||||
x := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
y := [8]uint32{9, 10, 11, 12, 13, 14, 15, 16}
|
||||
xSlice := x[:]
|
||||
ySlice := y[:]
|
||||
|
||||
// Execute your function
|
||||
var result G1PointAffine
|
||||
result.FromLimbs(&xSlice, &ySlice)
|
||||
|
||||
var xBase G1BaseField
|
||||
var yBase G1BaseField
|
||||
xBase.FromLimbs(x)
|
||||
yBase.FromLimbs(y)
|
||||
|
||||
// Define your expected result
|
||||
expected := G1PointAffine{
|
||||
X: xBase,
|
||||
Y: yBase,
|
||||
}
|
||||
|
||||
// Test if result is as expected
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestGetFixedLimbs(t *testing.T) {
|
||||
t.Run("case of valid input of length less than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of valid input of length 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of empty input", func(t *testing.T) {
|
||||
slice := []uint32{}
|
||||
expected := [8]uint32{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
result := GetFixedLimbs(&slice)
|
||||
assert.Equal(t, result, expected)
|
||||
})
|
||||
|
||||
t.Run("case of input length greater than 8", func(t *testing.T) {
|
||||
slice := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("the code did not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
GetFixedLimbs(&slice)
|
||||
})
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
import (
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
|
||||
// #include "projective.h"
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
|
||||
// G2 extension field
|
||||
|
||||
type G2Element [{{.G2ElementSize}}]uint64
|
||||
|
||||
type ExtentionField struct {
|
||||
A0, A1 G2Element
|
||||
}
|
||||
|
||||
type G2PointAffine struct {
|
||||
X, Y ExtentionField
|
||||
}
|
||||
|
||||
type G2Point struct {
|
||||
X, Y, Z ExtentionField
|
||||
}
|
||||
|
||||
func (p *G2Point) Random() *G2Point {
|
||||
outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
|
||||
C.random_g2_projective_{{.CurveNameLowerCase}}(outC)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
|
||||
func (p *G2Point) Eq(pCompare *G2Point) bool {
|
||||
// Cast *Point{{.CurveNameUpperCase}} to *C.{{.CurveNameUpperCase}}_projective_t
|
||||
// The unsafe.Pointer cast is necessary because Go doesn't allow direct casts
|
||||
// between different pointer types.
|
||||
// It's your responsibility to ensure that the types are compatible.
|
||||
pC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
|
||||
pCompareC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(pCompare))
|
||||
|
||||
// Call the C function
|
||||
// The C function doesn't keep any references to the data,
|
||||
// so it's fine if the Go garbage collector moves or deletes the data later.
|
||||
return bool(C.eq_g2_{{.CurveNameLowerCase}}(pC, pCompareC))
|
||||
}
|
||||
|
||||
func (f *G2Element) ToBytesLe() []byte {
|
||||
var bytes []byte
|
||||
for _, val := range f {
|
||||
buf := make([]byte, 8) // 8 bytes because uint64 is 64-bit
|
||||
binary.LittleEndian.PutUint64(buf, val)
|
||||
bytes = append(bytes, buf...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (p *G2Point) FromAffine(affine *G2PointAffine) *G2Point {
|
||||
out := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
|
||||
in := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(affine))
|
||||
|
||||
C.g2_projective_from_affine_{{.CurveNameLowerCase}}(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2PointAffine) FromProjective(projective *G2Point) *G2PointAffine {
|
||||
out := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(p))
|
||||
in := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(projective))
|
||||
|
||||
C.g2_projective_to_affine_{{.CurveNameLowerCase}}(out, in)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *G2Point) IsOnCurve() bool {
|
||||
// Directly copy memory from the C struct to the Go struct
|
||||
point := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(p))
|
||||
res := C.g2_projective_is_on_curve_{{.CurveNameLowerCase}}(point)
|
||||
|
||||
return bool(res)
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestG2Eqg2(t *testing.T) {
|
||||
var point G2Point
|
||||
|
||||
point.Random()
|
||||
|
||||
assert.True(t, point.Eq(&point))
|
||||
}
|
||||
|
||||
func TestG2FromProjectiveToAffine(t *testing.T) {
|
||||
var projective G2Point
|
||||
projective.Random()
|
||||
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&projective)
|
||||
|
||||
var projective2 G2Point
|
||||
projective2.FromAffine(&affine)
|
||||
|
||||
assert.True(t, projective.IsOnCurve())
|
||||
assert.True(t, projective2.IsOnCurve())
|
||||
assert.True(t, projective.Eq(&projective2))
|
||||
}
|
||||
|
||||
func TestG2Eqg2NotEqual(t *testing.T) {
|
||||
var point G2Point
|
||||
point.Random()
|
||||
|
||||
var point2 G2Point
|
||||
point2.Random()
|
||||
|
||||
assert.False(t, point.Eq(&point2))
|
||||
}
|
||||
|
||||
func TestG2ToBytes(t *testing.T) {
|
||||
element := G2Element{0x6546098ea84b6298, 0x4a384533d1f68aca, 0xaa0666972d771336, 0x1569e4a34321993}
|
||||
bytes := element.ToBytesLe()
|
||||
|
||||
assert.Equal(t, bytes, []byte{0x98, 0x62, 0x4b, 0xa8, 0x8e, 0x9, 0x46, 0x65, 0xca, 0x8a, 0xf6, 0xd1, 0x33, 0x45, 0x38, 0x4a, 0x36, 0x13, 0x77, 0x2d, 0x97, 0x66, 0x6, 0xaa, 0x93, 0x19, 0x32, 0x34, 0x4a, 0x9e, 0x56, 0x1})
|
||||
}
|
||||
|
||||
func TestG2ShouldConvertToProjective(t *testing.T) {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var pointProjective G2Point
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G2PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
var proj G2Point
|
||||
proj.FromAffine(&pointAffine)
|
||||
|
||||
assert.True(t, proj.IsOnCurve())
|
||||
assert.True(t, pointProjective.Eq(&proj))
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdbool.h>
|
||||
// msm.h
|
||||
|
||||
#ifndef _{{.CurveNameUpperCase}}_MSM_H
|
||||
#define _{{.CurveNameUpperCase}}_MSM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of {{.CurveNameUpperCase}} projective and affine structs
|
||||
typedef struct {{.CurveNameUpperCase}}_projective_t {{.CurveNameUpperCase}}_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_projective_t {{.CurveNameUpperCase}}_g2_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_affine_t {{.CurveNameUpperCase}}_affine_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_affine_t {{.CurveNameUpperCase}}_g2_affine_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_scalar_t {{.CurveNameUpperCase}}_scalar_t;
|
||||
typedef cudaStream_t CudaStream_t;
|
||||
|
||||
int msm_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* out, {{.CurveNameUpperCase}}_affine_t* points, {{.CurveNameUpperCase}}_scalar_t* scalars, size_t count, size_t device_id);
|
||||
|
||||
int msm_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* out,
|
||||
{{.CurveNameUpperCase}}_affine_t* points,
|
||||
{{.CurveNameUpperCase}}_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
|
||||
{{.CurveNameUpperCase}}_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
|
||||
{{.CurveNameUpperCase}}_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id);
|
||||
|
||||
int msm_g2_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_g2_projective_t* out,
|
||||
{{.CurveNameUpperCase}}_g2_affine_t* points,
|
||||
{{.CurveNameUpperCase}}_scalar_t* scalars,
|
||||
size_t count,
|
||||
size_t device_id);
|
||||
|
||||
int msm_batch_g2_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_g2_projective_t* out,
|
||||
{{.CurveNameUpperCase}}_g2_affine_t* points,
|
||||
{{.CurveNameUpperCase}}_scalar_t* scalars,
|
||||
size_t batch_size,
|
||||
size_t msm_size,
|
||||
size_t device_id);
|
||||
|
||||
int commit_g2_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_g2_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
|
||||
{{.CurveNameUpperCase}}_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
unsigned large_bucket_factor,
|
||||
size_t device_id);
|
||||
|
||||
int commit_batch_g2_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_g2_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_scalars,
|
||||
{{.CurveNameUpperCase}}_g2_affine_t* d_points,
|
||||
size_t count,
|
||||
size_t batch_size,
|
||||
size_t device_id,
|
||||
cudaStream_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _{{.CurveNameUpperCase}}_MSM_H */
|
||||
@@ -1,181 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// ntt.h
|
||||
|
||||
#ifndef _{{.CurveNameUpperCase}}_NTT_H
|
||||
#define _{{.CurveNameUpperCase}}_NTT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Incomplete declaration of {{.CurveNameUpperCase}} projective and affine structs
|
||||
typedef struct {{.CurveNameUpperCase}}_projective_t {{.CurveNameUpperCase}}_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_affine_t {{.CurveNameUpperCase}}_affine_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_scalar_t {{.CurveNameUpperCase}}_scalar_t;
|
||||
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_projective_t {{.CurveNameUpperCase}}_g2_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_affine_t {{.CurveNameUpperCase}}_g2_affine_t;
|
||||
|
||||
int ntt_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_scalar_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ntt_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
int ecntt_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* arr, uint32_t n, bool inverse, size_t device_id);
|
||||
int ecntt_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* arr, uint32_t arr_size, uint32_t batch_size, bool inverse, size_t device_id);
|
||||
|
||||
{{.CurveNameUpperCase}}_scalar_t*
|
||||
build_domain_cuda_{{.CurveNameLowerCase}}(uint32_t domain_size, uint32_t logn, bool inverse, size_t device_id, size_t stream);
|
||||
|
||||
int interpolate_scalars_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_points_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_on_coset_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int interpolate_scalars_batch_on_coset_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_evaluations,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
|
||||
int evaluate_scalars_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
unsigned device_id,
|
||||
size_t stream);
|
||||
int evaluate_scalars_on_coset_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
int evaluate_points_on_coset_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* d_out,
|
||||
{{.CurveNameUpperCase}}_projective_t* d_coefficients,
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_domain,
|
||||
unsigned domain_size,
|
||||
unsigned n,
|
||||
unsigned batch_size,
|
||||
{{.CurveNameUpperCase}}_scalar_t* coset_powers,
|
||||
size_t device_id,
|
||||
size_t stream);
|
||||
|
||||
int reverse_order_scalars_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_scalar_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_scalars_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int reverse_order_points_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* arr, int n, size_t device_id, size_t stream);
|
||||
int reverse_order_points_batch_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* arr, int n, int batch_size, size_t device_id, size_t stream);
|
||||
int add_scalars_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out, {{.CurveNameUpperCase}}_scalar_t* d_in1, {{.CurveNameUpperCase}}_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int sub_scalars_cuda_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* d_out, {{.CurveNameUpperCase}}_scalar_t* d_in1, {{.CurveNameUpperCase}}_scalar_t* d_in2, unsigned n, size_t stream);
|
||||
int to_montgomery_scalars_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_scalars_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_scalar_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g1
|
||||
int to_montgomery_proj_points_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
// points g2
|
||||
int to_montgomery_proj_points_g2_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_proj_points_g2_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* d_inout, unsigned n, size_t stream);
|
||||
int to_montgomery_aff_points_g2_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
int from_montgomery_aff_points_g2_cuda_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_affine_t* d_inout, unsigned n, size_t stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _{{.CurveNameUpperCase}}_NTT_H */
|
||||
@@ -1,33 +0,0 @@
|
||||
#include <cuda.h>
|
||||
#include <stdbool.h>
|
||||
// projective.h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct {{.CurveNameUpperCase}}_projective_t {{.CurveNameUpperCase}}_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_projective_t {{.CurveNameUpperCase}}_g2_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_affine_t {{.CurveNameUpperCase}}_affine_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_g2_affine_t {{.CurveNameUpperCase}}_g2_affine_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_scalar_t {{.CurveNameUpperCase}}_scalar_t;
|
||||
|
||||
bool projective_is_on_curve_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* point1);
|
||||
|
||||
int random_scalar_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_scalar_t* out);
|
||||
int random_projective_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* out);
|
||||
{{.CurveNameUpperCase}}_projective_t* projective_zero_{{.CurveNameLowerCase}}();
|
||||
int projective_to_affine_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_affine_t* out, {{.CurveNameUpperCase}}_projective_t* point1);
|
||||
int projective_from_affine_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* out, {{.CurveNameUpperCase}}_affine_t* point1);
|
||||
|
||||
int random_g2_projective_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* out);
|
||||
int g2_projective_to_affine_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_affine_t* out, {{.CurveNameUpperCase}}_g2_projective_t* point1);
|
||||
int g2_projective_from_affine_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* out, {{.CurveNameUpperCase}}_g2_affine_t* point1);
|
||||
bool g2_projective_is_on_curve_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* point1);
|
||||
|
||||
bool eq_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_projective_t* point1, {{.CurveNameUpperCase}}_projective_t* point2);
|
||||
bool eq_g2_{{.CurveNameLowerCase}}({{.CurveNameUpperCase}}_g2_projective_t* point1, {{.CurveNameUpperCase}}_g2_projective_t* point2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,32 +0,0 @@
|
||||
#include <stdbool.h>
|
||||
#include <cuda.h>
|
||||
// ve_mod_mult.h
|
||||
|
||||
#ifndef _{{.CurveNameUpperCase}}_VEC_MULT_H
|
||||
#define _{{.CurveNameUpperCase}}_VEC_MULT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct {{.CurveNameUpperCase}}_projective_t {{.CurveNameUpperCase}}_projective_t;
|
||||
typedef struct {{.CurveNameUpperCase}}_scalar_t {{.CurveNameUpperCase}}_scalar_t;
|
||||
|
||||
int32_t vec_mod_mult_point_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_projective_t* inout, {{.CurveNameUpperCase}}_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_scalar_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* inout, {{.CurveNameUpperCase}}_scalar_t* scalar_vec, size_t n_elments, size_t device_id);
|
||||
int32_t vec_mod_mult_device_scalar_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* inout, {{.CurveNameUpperCase}}_scalar_t* scalar_vec, size_t n_elements, size_t device_id);
|
||||
int32_t matrix_vec_mod_mult_{{.CurveNameLowerCase}}(
|
||||
{{.CurveNameUpperCase}}_scalar_t* matrix_flattened,
|
||||
{{.CurveNameUpperCase}}_scalar_t* input,
|
||||
{{.CurveNameUpperCase}}_scalar_t* output,
|
||||
size_t n_elments,
|
||||
size_t device_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _{{.CurveNameUpperCase}}_VEC_MULT_H */
|
||||
@@ -1,312 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/consensys/bavard"
|
||||
config "github.com/ingonyama-zk/icicle/goicicle/templates/curves"
|
||||
)
|
||||
|
||||
const (
|
||||
copyrightHolder = "Ingonyama"
|
||||
generatedBy = "Ingonyama"
|
||||
copyrightYear = 2023
|
||||
baseDir = "../curves/"
|
||||
)
|
||||
|
||||
var bgen = bavard.NewBatchGenerator(copyrightHolder, copyrightYear, generatedBy)
|
||||
|
||||
func genMainFiles() {
|
||||
bn254_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "g1.go"), Templates: []string{"g1.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "g1.go"), Templates: []string{"g1.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "g1.go"), Templates: []string{"g1.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "g1.go"), Templates: []string{"g1.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./curves/", bls12377_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./curves/", bn254_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./curves/", bls12381_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./curves/", bw6761_entries...))
|
||||
|
||||
bn254_g2_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "g2.go"), Templates: []string{"g2.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_g2_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "g2.go"), Templates: []string{"g2.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_g2_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "g2.go"), Templates: []string{"g2.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_g2_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "g2.go"), Templates: []string{"g2.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./curves/", bls12377_g2_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./curves/", bn254_g2_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./curves/", bls12381_g2_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./curves/", bw6761_g2_entries...))
|
||||
|
||||
bn254_msm_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "msm.go"), Templates: []string{"msm.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_msm_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "msm.go"), Templates: []string{"msm.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_msm_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "msm.go"), Templates: []string{"msm.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_msm_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "msm.go"), Templates: []string{"msm.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./msm/", bls12377_msm_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./msm/", bn254_msm_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./msm/", bls12381_msm_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./msm/", bw6761_msm_entries...))
|
||||
|
||||
bn254_ntt_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "ntt.go"), Templates: []string{"ntt.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_ntt_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "ntt.go"), Templates: []string{"ntt.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_ntt_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "ntt.go"), Templates: []string{"ntt.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_ntt_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "ntt.go"), Templates: []string{"ntt.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./ntt/", bls12377_ntt_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./ntt/", bn254_ntt_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./ntt/", bls12381_ntt_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./ntt/", bw6761_ntt_entries...))
|
||||
|
||||
bn254_vec_mod_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "vec_mod.go"), Templates: []string{"vec_mod.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_vec_mod_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "vec_mod.go"), Templates: []string{"vec_mod.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_vec_mod_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "vec_mod.go"), Templates: []string{"vec_mod.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_vec_mod_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "vec_mod.go"), Templates: []string{"vec_mod.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./ops/", bls12377_vec_mod_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./ops/", bn254_vec_mod_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./ops/", bls12381_vec_mod_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./ops/", bw6761_vec_mod_entries...))
|
||||
|
||||
h_msm_bn254 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "include", "msm.h"), Templates: []string{"msm.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_msm_bls12377 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "include", "msm.h"), Templates: []string{"msm.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_msm_bls12381 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "include", "msm.h"), Templates: []string{"msm.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_msm_bw6761 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "include", "msm.h"), Templates: []string{"msm.h.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./hfiles/", h_msm_bls12377...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./hfiles/", h_msm_bn254...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./hfiles/", h_msm_bls12381...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./hfiles/", h_msm_bw6761...))
|
||||
|
||||
h_ntt_bn254 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "include", "ntt.h"), Templates: []string{"ntt.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_ntt_bls12377 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "include", "ntt.h"), Templates: []string{"ntt.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_ntt_bls12381 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "include", "ntt.h"), Templates: []string{"ntt.h.tmpl"}},
|
||||
}
|
||||
|
||||
h_ntt_bw6761 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "include", "ntt.h"), Templates: []string{"ntt.h.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./hfiles/", h_ntt_bls12377...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./hfiles/", h_ntt_bn254...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./hfiles/", h_ntt_bls12381...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./hfiles/", h_ntt_bw6761...))
|
||||
|
||||
ve_mod_mult_h_bn254 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "include", "ve_mod_mult.h"), Templates: []string{"ve_mod_mult.h.tmpl"}},
|
||||
}
|
||||
|
||||
ve_mod_mult_h_bls12377 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "include", "ve_mod_mult.h"), Templates: []string{"ve_mod_mult.h.tmpl"}},
|
||||
}
|
||||
|
||||
ve_mod_mult_ht_bls12381 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "include", "ve_mod_mult.h"), Templates: []string{"ve_mod_mult.h.tmpl"}},
|
||||
}
|
||||
|
||||
ve_mod_mult_ht_bw6761 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "include", "ve_mod_mult.h"), Templates: []string{"ve_mod_mult.h.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./hfiles/", ve_mod_mult_h_bls12377...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./hfiles/", ve_mod_mult_h_bn254...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./hfiles/", ve_mod_mult_ht_bls12381...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./hfiles/", ve_mod_mult_ht_bw6761...))
|
||||
|
||||
projective_bn254 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "include", "projective.h"), Templates: []string{"projective.h.tmpl"}},
|
||||
}
|
||||
|
||||
projective_bls12377 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "include", "projective.h"), Templates: []string{"projective.h.tmpl"}},
|
||||
}
|
||||
|
||||
projective_bls12381 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "include", "projective.h"), Templates: []string{"projective.h.tmpl"}},
|
||||
}
|
||||
|
||||
projective_bw6761 := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "include", "projective.h"), Templates: []string{"projective.h.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./hfiles/", projective_bls12377...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./hfiles/", projective_bn254...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./hfiles/", projective_bls12381...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./hfiles/", projective_bw6761...))
|
||||
}
|
||||
|
||||
func genTestFiles() {
|
||||
// G1 TESTS
|
||||
bn254_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "g1_test.go"), Templates: []string{"g1_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "g1_test.go"), Templates: []string{"g1_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "g1_test.go"), Templates: []string{"g1_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_entries := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "g1_test.go"), Templates: []string{"g1_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./curves/", bls12377_entries...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./curves/", bn254_entries...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./curves/", bls12381_entries...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./curves/", bw6761_entries...))
|
||||
|
||||
// G2 TESTS
|
||||
bn254_entries_g2_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "g2_test.go"), Templates: []string{"g2_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_entries_g2_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "g2_test.go"), Templates: []string{"g2_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_entries_g2_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "g2_test.go"), Templates: []string{"g2_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_entries_g2_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "g2_test.go"), Templates: []string{"g2_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./curves/", bls12377_entries_g2_test...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./curves/", bn254_entries_g2_test...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./curves/", bls12381_entries_g2_test...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./curves/", bw6761_entries_g2_test...))
|
||||
|
||||
// MSM TEST
|
||||
bn254_entries_msm_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "msm_test.go"), Templates: []string{"msm_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_entries_msm_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "msm_test.go"), Templates: []string{"msm_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_entries_msm_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "msm_test.go"), Templates: []string{"msm_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_entries_msm_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "msm_test.go"), Templates: []string{"msm_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./msm/", bls12377_entries_msm_test...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./msm/", bn254_entries_msm_test...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./msm/", bls12381_entries_msm_test...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./msm/", bw6761_entries_msm_test...))
|
||||
|
||||
// FFT TEST
|
||||
bn254_entries_fft_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bn254", "ntt_test.go"), Templates: []string{"ntt_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12377_entries_fft_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12377", "ntt_test.go"), Templates: []string{"ntt_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bls12381_entries_fft_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bls12381", "ntt_test.go"), Templates: []string{"ntt_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
bw6761_entries_msm_test_entries_fft_test := []bavard.Entry{
|
||||
{File: filepath.Join(baseDir, "bw6761", "ntt_test.go"), Templates: []string{"ntt_test.go.tmpl"}},
|
||||
}
|
||||
|
||||
assertNoError(bgen.Generate(config.BLS_12_377, config.BLS_12_377.PackageName, "./ntt/", bls12377_entries_fft_test...))
|
||||
assertNoError(bgen.Generate(config.BN_254, config.BN_254.PackageName, "./ntt/", bn254_entries_fft_test...))
|
||||
assertNoError(bgen.Generate(config.BLS_12_381, config.BLS_12_381.PackageName, "./ntt/", bls12381_entries_fft_test...))
|
||||
assertNoError(bgen.Generate(config.BW6_761, config.BW6_761.PackageName, "./ntt/", bw6761_entries_msm_test_entries_fft_test...))
|
||||
}
|
||||
|
||||
func main() {
|
||||
genMainFiles()
|
||||
genTestFiles()
|
||||
}
|
||||
|
||||
func assertNoError(err error) {
|
||||
if err != nil {
|
||||
fmt.Printf("\n%s\n", err.Error())
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
|
||||
// #include "msm.h"
|
||||
import "C"
|
||||
|
||||
func Msm(out *G1ProjectivePoint, points []G1PointAffine, scalars []G1ScalarField, device_id int) (*G1ProjectivePoint, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(out))
|
||||
ret := C.msm_cuda_{{.CurveNameLowerCase}}(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_cuda_{{.CurveNameLowerCase}} returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2(out *G2Point, points []G2PointAffine, scalars []G1ScalarField, device_id int) (*G2Point, error) {
|
||||
if len(points) != len(scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(&points[0]))
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&scalars[0]))
|
||||
outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(out))
|
||||
|
||||
ret := C.msm_g2_cuda_{{.CurveNameLowerCase}}(outC, pointsC, scalarsC, C.size_t(len(points)), C.size_t(device_id))
|
||||
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_g2_cuda_{{.CurveNameLowerCase}} returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmBatch(points *[]G1PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G1ProjectivePoint, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G1ProjectivePoint, batchSize)
|
||||
|
||||
for i := 0; i < len(out); i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.SetZero()
|
||||
|
||||
out[i] = p
|
||||
}
|
||||
|
||||
outC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_cuda_{{.CurveNameLowerCase}}(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_{{.CurveNameLowerCase}} returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MsmG2Batch(points *[]G2PointAffine, scalars *[]G1ScalarField, batchSize, deviceId int) ([]G2Point, error) {
|
||||
// Check for nil pointers
|
||||
if points == nil || scalars == nil {
|
||||
return nil, errors.New("points or scalars is nil")
|
||||
}
|
||||
|
||||
if len(*points) != len(*scalars) {
|
||||
return nil, errors.New("error on: len(points) != len(scalars)")
|
||||
}
|
||||
|
||||
// Check for empty slices
|
||||
if len(*points) == 0 || len(*scalars) == 0 {
|
||||
return nil, errors.New("points or scalars is empty")
|
||||
}
|
||||
|
||||
// Check for zero batchSize
|
||||
if batchSize <= 0 {
|
||||
return nil, errors.New("error on: batchSize must be greater than zero")
|
||||
}
|
||||
|
||||
out := make([]G2Point, batchSize)
|
||||
|
||||
outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(unsafe.Pointer(&out[0]))
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(unsafe.Pointer(&(*points)[0]))
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
msmSizeC := C.size_t(len(*points) / batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
batchSizeC := C.size_t(batchSize)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_{{.CurveNameLowerCase}}(outC, pointsC, scalarsC, batchSizeC, msmSizeC, deviceIdC)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("msm_batch_cuda_{{.CurveNameLowerCase}} returned error code: %d", ret)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func Commit(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.{{.CurveNameUpperCase}}_projective_t)(d_out)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_cuda_{{.CurveNameLowerCase}}(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2(d_out, d_scalars, d_points unsafe.Pointer, count, bucketFactor int) int {
|
||||
d_outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
largeBucketFactorC := C.uint(bucketFactor)
|
||||
|
||||
ret := C.commit_g2_cuda_{{.CurveNameLowerCase}}(d_outC, scalarsC, pointsC, countC, largeBucketFactorC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitBatch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.{{.CurveNameUpperCase}}_projective_t)(d_out)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.commit_batch_cuda_{{.CurveNameLowerCase}}(d_outC, scalarsC, pointsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func CommitG2Batch(d_out, d_scalars, d_points unsafe.Pointer, count, batch_size int) int {
|
||||
d_outC := (*C.{{.CurveNameUpperCase}}_g2_projective_t)(d_out)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(d_points)
|
||||
countC := (C.size_t)(count)
|
||||
batch_sizeC := (C.size_t)(batch_size)
|
||||
|
||||
ret := C.msm_batch_g2_cuda_{{.CurveNameLowerCase}}(d_outC, pointsC, scalarsC, countC, batch_sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -1,342 +0,0 @@
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func GeneratePoints(count int) []G1PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G1PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
var pointProjective G1ProjectivePoint
|
||||
pointProjective.Random()
|
||||
|
||||
var pointAffine G1PointAffine
|
||||
pointAffine.FromProjective(&pointProjective)
|
||||
|
||||
points = append(points, pointAffine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func GeneratePointsProj(count int) []G1ProjectivePoint {
|
||||
// Declare a slice of integers
|
||||
var points []G1ProjectivePoint
|
||||
// Use a loop to populate the slice
|
||||
for i := 0; i < count; i++ {
|
||||
var p G1ProjectivePoint
|
||||
p.Random()
|
||||
|
||||
points = append(points, p)
|
||||
}
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func GenerateScalars(count int, skewed bool) []G1ScalarField {
|
||||
// Declare a slice of integers
|
||||
var scalars []G1ScalarField
|
||||
|
||||
var rand G1ScalarField
|
||||
var zero G1ScalarField
|
||||
var one G1ScalarField
|
||||
var randLarge G1ScalarField
|
||||
|
||||
zero.SetZero()
|
||||
one.SetOne()
|
||||
randLarge.Random()
|
||||
|
||||
if skewed && count > 1_200_000 {
|
||||
for i := 0; i < count-1_200_000; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
|
||||
for i := 0; i < 600_000; i++ {
|
||||
scalars = append(scalars, randLarge)
|
||||
}
|
||||
for i := 0; i < 400_000; i++ {
|
||||
scalars = append(scalars, zero)
|
||||
}
|
||||
for i := 0; i < 200_000; i++ {
|
||||
scalars = append(scalars, one)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
rand.Random()
|
||||
scalars = append(scalars, rand)
|
||||
}
|
||||
}
|
||||
|
||||
return scalars[:count]
|
||||
}
|
||||
|
||||
func TestMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G1ProjectivePoint)
|
||||
startTime := time.Now()
|
||||
_, e := Msm(out, points, scalars, 0) // non mont
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitMSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1<<v - 1
|
||||
|
||||
points := GeneratePoints(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := count * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := Commit(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G1ProjectivePoint, 1)
|
||||
goicicle.CudaMemCpyDtoH[G1ProjectivePoint](outHost, out_d, 96)
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.True(t, outHost[0].IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommit(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
|
||||
out_d, _ := goicicle.CudaMalloc(96)
|
||||
|
||||
pointsBytes := msmSize * 64
|
||||
points_d, _ := goicicle.CudaMalloc(pointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1PointAffine](points_d, points, pointsBytes)
|
||||
|
||||
scalarBytes := msmSize * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
e := Commit(out_d, scalars_d, points_d, msmSize, 10)
|
||||
|
||||
if e != 0 {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchMSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GeneratePoints(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmBatch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatch{{.CurveNameUpperCase}} returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMSM(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GeneratePoints(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G1ProjectivePoint)
|
||||
_, e := Msm(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// G2
|
||||
func GenerateG2Points(count int) []G2PointAffine {
|
||||
// Declare a slice of integers
|
||||
var points []G2PointAffine
|
||||
|
||||
// populate the slice
|
||||
for i := 0; i < 10; i++ {
|
||||
fmt.Print() // this prevents the test from hanging. TODO: figure out why
|
||||
var p G2Point
|
||||
p.Random()
|
||||
var affine G2PointAffine
|
||||
affine.FromProjective(&p)
|
||||
|
||||
points = append(points, affine)
|
||||
}
|
||||
|
||||
log2_10 := math.Log2(10)
|
||||
log2Count := math.Log2(float64(count))
|
||||
log2Size := int(math.Ceil(log2Count - log2_10))
|
||||
|
||||
for i := 0; i < log2Size; i++ {
|
||||
points = append(points, points...)
|
||||
}
|
||||
|
||||
return points[:count]
|
||||
}
|
||||
|
||||
func TestMsmG2{{.CurveNameUpperCase}}(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
assert.Equal(t, e, nil, "error should be nil")
|
||||
assert.True(t, out.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMsmG2{{.CurveNameUpperCase}}(b *testing.B) {
|
||||
LOG_MSM_SIZES := []int{20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logMsmSize := range LOG_MSM_SIZES {
|
||||
msmSize := 1 << logMsmSize
|
||||
points := GenerateG2Points(msmSize)
|
||||
scalars := GenerateScalars(msmSize, false)
|
||||
b.Run(fmt.Sprintf("MSM G2 %d", logMsmSize), func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
out := new(G2Point)
|
||||
_, e := MsmG2(out, points, scalars, 0)
|
||||
|
||||
if e != nil {
|
||||
panic("Error occurred")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitG2MSM(t *testing.T) {
|
||||
for _, v := range []int{8} {
|
||||
count := 1 << v
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
fmt.Print("Finished generating points\n")
|
||||
scalars := GenerateScalars(count, false)
|
||||
fmt.Print("Finished generating scalars\n")
|
||||
|
||||
var sizeCheckG2PointAffine G2PointAffine
|
||||
inputPointsBytes := count * int(unsafe.Sizeof(sizeCheckG2PointAffine))
|
||||
|
||||
var sizeCheckG2Point G2Point
|
||||
out_d, _ := goicicle.CudaMalloc(int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
points_d, _ := goicicle.CudaMalloc(inputPointsBytes)
|
||||
goicicle.CudaMemCpyHtoD[G2PointAffine](points_d, points, inputPointsBytes)
|
||||
|
||||
scalarBytes := count * 32
|
||||
scalars_d, _ := goicicle.CudaMalloc(scalarBytes)
|
||||
goicicle.CudaMemCpyHtoD[G1ScalarField](scalars_d, scalars, scalarBytes)
|
||||
|
||||
startTime := time.Now()
|
||||
e := CommitG2(out_d, scalars_d, points_d, count, 10)
|
||||
fmt.Printf("icicle MSM took: %d ms\n", time.Since(startTime).Milliseconds())
|
||||
|
||||
outHost := make([]G2Point, 1)
|
||||
goicicle.CudaMemCpyDtoH[G2Point](outHost, out_d, int(unsafe.Sizeof(sizeCheckG2Point)))
|
||||
|
||||
assert.Equal(t, e, 0, "error should be 0")
|
||||
assert.Equal(t, len(outHost), 1)
|
||||
result := outHost[0]
|
||||
|
||||
assert.True(t, result.IsOnCurve())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchG2MSM(t *testing.T) {
|
||||
for _, batchPow2 := range []int{2, 4} {
|
||||
for _, pow2 := range []int{4, 6} {
|
||||
msmSize := 1 << pow2
|
||||
batchSize := 1 << batchPow2
|
||||
count := msmSize * batchSize
|
||||
|
||||
points := GenerateG2Points(count)
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
pointsResults, e := MsmG2Batch(&points, &scalars, batchSize, 0)
|
||||
|
||||
if e != nil {
|
||||
t.Errorf("MsmBatch{{.CurveNameUpperCase}} returned an error: %v", e)
|
||||
}
|
||||
|
||||
if len(pointsResults) != batchSize {
|
||||
t.Errorf("Expected length %d, but got %d", batchSize, len(pointsResults))
|
||||
}
|
||||
|
||||
for _, s := range pointsResults {
|
||||
assert.True(t, s.IsOnCurve())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/goicicle"
|
||||
)
|
||||
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
|
||||
// #include "ntt.h"
|
||||
import "C"
|
||||
|
||||
const (
|
||||
NONE = 0
|
||||
DIF = 1
|
||||
DIT = 2
|
||||
)
|
||||
|
||||
func Ntt(scalars *[]G1ScalarField, isInverse bool, deviceId int) uint64 {
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
|
||||
ret := C.ntt_cuda_{{.CurveNameLowerCase}}(scalarsC, C.uint32_t(len(*scalars)), C.bool(isInverse), C.size_t(deviceId))
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func NttBatch(scalars *[]G1ScalarField, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(unsafe.Pointer(&(*scalars)[0]))
|
||||
isInverseC := C.bool(isInverse)
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
|
||||
ret := C.ntt_batch_cuda_{{.CurveNameLowerCase}}(scalarsC, C.uint32_t(len(*scalars)), batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNtt(values *[]G1ProjectivePoint, isInverse bool, deviceId int) uint64 {
|
||||
valuesC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
|
||||
ret := C.ecntt_cuda_{{.CurveNameLowerCase}}(valuesC, n, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func EcNttBatch(values *[]G1ProjectivePoint, isInverse bool, batchSize, deviceId int) uint64 {
|
||||
valuesC := (*C.{{.CurveNameUpperCase}}_projective_t)(unsafe.Pointer(&(*values)[0]))
|
||||
deviceIdC := C.size_t(deviceId)
|
||||
isInverseC := C.bool(isInverse)
|
||||
n := C.uint32_t(len(*values))
|
||||
batchSizeC := C.uint32_t(batchSize)
|
||||
|
||||
ret := C.ecntt_batch_cuda_{{.CurveNameLowerCase}}(valuesC, n, batchSizeC, isInverseC, deviceIdC)
|
||||
|
||||
return uint64(ret)
|
||||
}
|
||||
|
||||
func GenerateTwiddles(d_size int, log_d_size int, inverse bool) (up unsafe.Pointer, err error) {
|
||||
domain_size := C.uint32_t(d_size)
|
||||
logn := C.uint32_t(log_d_size)
|
||||
is_inverse := C.bool(inverse)
|
||||
|
||||
dp := C.build_domain_cuda_{{.CurveNameLowerCase}}(domain_size, logn, is_inverse, 0, 0)
|
||||
|
||||
if dp == nil {
|
||||
err = errors.New("nullptr returned from generating twiddles")
|
||||
return unsafe.Pointer(nil), err
|
||||
}
|
||||
|
||||
return unsafe.Pointer(dp), nil
|
||||
}
|
||||
|
||||
// Reverses d_scalars in-place
|
||||
func ReverseScalars(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
lenC := C.int(len)
|
||||
if success := C.reverse_order_scalars_cuda_{{.CurveNameLowerCase}}(scalarsC, lenC, 0, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func Interpolate(scalars, twiddles, cosetPowers unsafe.Pointer, size int, isCoset bool) unsafe.Pointer {
|
||||
size_d := size * 32
|
||||
dp, err := goicicle.CudaMalloc(size_d)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d_out := (*C.{{.CurveNameUpperCase}}_scalar_t)(dp)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(scalars)
|
||||
twiddlesC := (*C.{{.CurveNameUpperCase}}_scalar_t)(twiddles)
|
||||
cosetPowersC := (*C.{{.CurveNameUpperCase}}_scalar_t)(cosetPowers)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.interpolate_scalars_on_coset_cuda_{{.CurveNameLowerCase}}(d_out, scalarsC, twiddlesC, sizeC, cosetPowersC, 0, 0)
|
||||
} else {
|
||||
ret = C.interpolate_scalars_cuda_{{.CurveNameLowerCase}}(d_out, scalarsC, twiddlesC, sizeC, 0, 0)
|
||||
}
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
}
|
||||
|
||||
return unsafe.Pointer(d_out)
|
||||
}
|
||||
|
||||
func Evaluate(scalars_out, scalars, twiddles, coset_powers unsafe.Pointer, scalars_size, twiddles_size int, isCoset bool) int {
|
||||
scalars_outC := (*C.{{.CurveNameUpperCase}}_scalar_t)(scalars_out)
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(scalars)
|
||||
twiddlesC := (*C.{{.CurveNameUpperCase}}_scalar_t)(twiddles)
|
||||
coset_powersC := (*C.{{.CurveNameUpperCase}}_scalar_t)(coset_powers)
|
||||
sizeC := C.uint(scalars_size)
|
||||
twiddlesC_size := C.uint(twiddles_size)
|
||||
|
||||
var ret C.int
|
||||
if isCoset {
|
||||
ret = C.evaluate_scalars_on_coset_cuda_{{.CurveNameLowerCase}}(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, coset_powersC, 0, 0)
|
||||
} else {
|
||||
ret = C.evaluate_scalars_cuda_{{.CurveNameLowerCase}}(scalars_outC, scalarsC, twiddlesC, twiddlesC_size, sizeC, 0, 0)
|
||||
}
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error interpolating")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarAdd(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.{{.CurveNameUpperCase}}_scalar_t)(in1_d)
|
||||
in2_dC := (*C.{{.CurveNameUpperCase}}_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.add_scalars_cuda_{{.CurveNameLowerCase}}(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error adding scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func VecScalarSub(in1_d, in2_d unsafe.Pointer, size int) int {
|
||||
in1_dC := (*C.{{.CurveNameUpperCase}}_scalar_t)(in1_d)
|
||||
in2_dC := (*C.{{.CurveNameUpperCase}}_scalar_t)(in2_d)
|
||||
sizeC := C.uint(size)
|
||||
|
||||
ret := C.sub_scalars_cuda_{{.CurveNameLowerCase}}(in1_dC, in1_dC, in2_dC, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error subtracting scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func ToMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.to_montgomery_scalars_cuda_{{.CurveNameLowerCase}}(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func FromMontgomery(d_scalars unsafe.Pointer, len int) (int, error) {
|
||||
scalarsC := (*C.{{.CurveNameUpperCase}}_scalar_t)(d_scalars)
|
||||
lenC := C.uint(len)
|
||||
if success := C.from_montgomery_scalars_cuda_{{.CurveNameLowerCase}}(scalarsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_cuda_{{.CurveNameLowerCase}}(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func G2AffinePointFromMontgomery(d_points unsafe.Pointer, len int) (int, error) {
|
||||
pointsC := (*C.{{.CurveNameUpperCase}}_g2_affine_t)(d_points)
|
||||
lenC := C.uint(len)
|
||||
|
||||
if success := C.from_montgomery_aff_points_g2_cuda_{{.CurveNameLowerCase}}(pointsC, lenC, 0); success != 0 {
|
||||
return -1, errors.New("reversing failed")
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNtt{{.CurveNameUpperCase}}Batch(t *testing.T) {
|
||||
count := 1 << 20
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
NttBatch(&nttResult, false, count, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNtt{{.CurveNameUpperCase}}CompareToGnarkDIF(t *testing.T) {
|
||||
count := 1 << 2
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestINtt{{.CurveNameUpperCase}}CompareToGnarkDIT(t *testing.T) {
|
||||
count := 1 << 3
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, true, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, nttResult)
|
||||
}
|
||||
|
||||
func TestNtt{{.CurveNameUpperCase}}(t *testing.T) {
|
||||
count := 1 << 3
|
||||
|
||||
scalars := GenerateScalars(count, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
|
||||
assert.Equal(t, nttResult, scalars)
|
||||
Ntt(&nttResult, false, 0)
|
||||
assert.NotEqual(t, nttResult, scalars)
|
||||
|
||||
inttResult := make([]G1ScalarField, len(nttResult))
|
||||
copy(inttResult, nttResult)
|
||||
|
||||
assert.Equal(t, inttResult, nttResult)
|
||||
Ntt(&inttResult, true, 0)
|
||||
assert.Equal(t, inttResult, scalars)
|
||||
}
|
||||
|
||||
func TestNttBatch{{.CurveNameUpperCase}}(t *testing.T) {
|
||||
count := 1 << 5
|
||||
batches := 4
|
||||
|
||||
scalars := GenerateScalars(count*batches, false)
|
||||
|
||||
var scalarVecOfVec [][]G1ScalarField = make([][]G1ScalarField, 0)
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
start := i * count
|
||||
end := (i + 1) * count
|
||||
batch := make([]G1ScalarField, len(scalars[start:end]))
|
||||
copy(batch, scalars[start:end])
|
||||
scalarVecOfVec = append(scalarVecOfVec, batch)
|
||||
}
|
||||
|
||||
nttBatchResult := make([]G1ScalarField, len(scalars))
|
||||
copy(nttBatchResult, scalars)
|
||||
|
||||
NttBatch(&nttBatchResult, false, count, 0)
|
||||
|
||||
var nttResultVecOfVec [][]G1ScalarField
|
||||
|
||||
for i := 0; i < batches; i++ {
|
||||
// Clone the slice
|
||||
clone := make([]G1ScalarField, len(scalarVecOfVec[i]))
|
||||
copy(clone, scalarVecOfVec[i])
|
||||
|
||||
// Add it to the result vector of vectors
|
||||
nttResultVecOfVec = append(nttResultVecOfVec, clone)
|
||||
|
||||
// Call the ntt_{{.CurveNameLowerCase}} function
|
||||
Ntt(&nttResultVecOfVec[i], false, 0)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, nttBatchResult, scalars)
|
||||
|
||||
// Check that the ntt of each vec of scalars is equal to the intt of the specific batch
|
||||
for i := 0; i < batches; i++ {
|
||||
if !reflect.DeepEqual(nttResultVecOfVec[i], nttBatchResult[i*count:((i+1)*count)]) {
|
||||
t.Errorf("ntt of vec of scalars not equal to intt of specific batch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNTT(b *testing.B) {
|
||||
LOG_NTT_SIZES := []int{12, 15, 20, 21, 22, 23, 24, 25, 26}
|
||||
|
||||
for _, logNTTSize := range LOG_NTT_SIZES {
|
||||
nttSize := 1 << logNTTSize
|
||||
b.Run(fmt.Sprintf("NTT %d", logNTTSize), func(b *testing.B) {
|
||||
scalars := GenerateScalars(nttSize, false)
|
||||
|
||||
nttResult := make([]G1ScalarField, len(scalars)) // Make a new slice with the same length
|
||||
copy(nttResult, scalars)
|
||||
for n := 0; n < b.N; n++ {
|
||||
Ntt(&nttResult, false, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
// #cgo CFLAGS: -I./include/
|
||||
// #cgo CFLAGS: -I/usr/local/cuda/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/../../ {{.SharedLib}}
|
||||
// #include "ve_mod_mult.h"
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func VecScalarMulMod(scalarVec1, scalarVec2 unsafe.Pointer, size int) int {
|
||||
scalarVec1C := (*C.{{.CurveNameUpperCase}}_scalar_t)(scalarVec1)
|
||||
scalarVec2C := (*C.{{.CurveNameUpperCase}}_scalar_t)(scalarVec2)
|
||||
sizeC := C.size_t(size)
|
||||
|
||||
ret := C.vec_mod_mult_device_scalar_{{.CurveNameLowerCase}}(scalarVec1C, scalarVec2C, sizeC, 0)
|
||||
|
||||
if ret != 0 {
|
||||
fmt.Print("error multiplying scalar vectors")
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -700,9 +700,9 @@ namespace ntt {
|
||||
* - `S` is the [scalar field](@ref scalar_t) of the curve;
|
||||
*/
|
||||
extern "C" cudaError_t CONCAT_EXPAND(CURVE, InitializeDomain)(
|
||||
curve_config::scalar_t primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode)
|
||||
curve_config::scalar_t* primitive_root, device_context::DeviceContext& ctx, bool fast_twiddles_mode)
|
||||
{
|
||||
return InitDomain(primitive_root, ctx, fast_twiddles_mode);
|
||||
return InitDomain(*primitive_root, ctx, fast_twiddles_mode);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -926,9 +926,9 @@ public:
|
||||
|
||||
static constexpr HOST_DEVICE_INLINE bool is_even(const Field& xs) { return ~xs.limbs_storage.limbs[0] & 1; }
|
||||
|
||||
// inverse assumes that xs is nonzero
|
||||
static constexpr HOST_DEVICE_INLINE Field inverse(const Field& xs)
|
||||
{
|
||||
if (xs == zero()) return zero();
|
||||
constexpr Field one = Field{CONFIG::one};
|
||||
constexpr ff_storage modulus = CONFIG::modulus;
|
||||
Field u = xs;
|
||||
|
||||
103
wrappers/golang/README.md
Normal file
103
wrappers/golang/README.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Golang Bindings
|
||||
|
||||
In order to build the underlying ICICLE libraries you should run the build script `build.sh` found in the `wrappers/golang` directory.
|
||||
|
||||
Build script USAGE
|
||||
|
||||
```
|
||||
./build <curve> [G2_enabled]
|
||||
|
||||
curve - The name of the curve to build or "all" to build all curves
|
||||
G2_enabled - Optional - To build with G2 enabled
|
||||
```
|
||||
|
||||
To build ICICLE libraries for all supported curves with G2 enabled.
|
||||
|
||||
```
|
||||
./build.sh all ON
|
||||
```
|
||||
|
||||
If you wish to build for a specific curve, for example bn254, without G2 enabled.
|
||||
|
||||
```
|
||||
./build.sh bn254
|
||||
```
|
||||
|
||||
>[!NOTE]
|
||||
>Current supported curves are `bn254`, `bls12_381`, `bls12_377` and `bw6_671`
|
||||
|
||||
>[!NOTE]
|
||||
>G2 is enabled by building your golang project with the build tag `g2`
|
||||
>Make sure to add it to your build tags if you want it enabled
|
||||
|
||||
## Running golang tests
|
||||
|
||||
To run the tests for curve bn254.
|
||||
|
||||
```
|
||||
go test ./wrappers/golang/curves/bn254 -count=1
|
||||
```
|
||||
|
||||
To run all the tests in the golang bindings
|
||||
|
||||
```
|
||||
go test --tags=g2 ./... -count=1
|
||||
```
|
||||
|
||||
## How do Golang bindings work?
|
||||
|
||||
The libraries produced from the CUDA code compilation are used to bind Golang to ICICLE's CUDA code.
|
||||
|
||||
1. These libraries (named `libingo_<curve>.a`) can be imported in your Go project to leverage the GPU accelerated functionalities provided by ICICLE.
|
||||
|
||||
2. In your Go project, you can use `cgo` to link these libraries. Here's a basic example on how you can use `cgo` to link these libraries:
|
||||
|
||||
```go
|
||||
/*
|
||||
#cgo LDFLAGS: -L/path/to/shared/libs -lingo_bn254
|
||||
#include "icicle.h" // make sure you use the correct header file(s)
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
// Now you can call the C functions from the ICICLE libraries.
|
||||
// Note that C function calls are prefixed with 'C.' in Go code.
|
||||
}
|
||||
```
|
||||
|
||||
Replace `/path/to/shared/libs` with the actual path where the shared libraries are located on your system.
|
||||
|
||||
## Common issues
|
||||
|
||||
### Cannot find shared library
|
||||
|
||||
In some cases you may encounter the following error, despite exporting the correct `LD_LIBRARY_PATH`.
|
||||
|
||||
```
|
||||
/usr/local/go/pkg/tool/linux_amd64/link: running gcc failed: exit status 1
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
/usr/bin/ld: cannot find -lbn254: No such file or directory
|
||||
collect2: error: ld returned 1 exit status
|
||||
```
|
||||
|
||||
This is normally fixed by exporting the path to the shared library location in the following way: `export CGO_LDFLAGS="-L/<path_to_shared_lib>/"`
|
||||
|
||||
### cuda_runtime.h: No such file or directory
|
||||
|
||||
```
|
||||
# github.com/ingonyama-zk/icicle/wrappers/golang/curves/bls12381
|
||||
In file included from wrappers/golang/curves/bls12381/curve.go:5:
|
||||
wrappers/golang/curves/bls12381/include/curve.h:1:10: fatal error: cuda_runtime.h: No such file or directory
|
||||
1 | #include <cuda_runtime.h>
|
||||
| ^~~~~~~~~~~~~~~~
|
||||
compilation terminated.
|
||||
```
|
||||
|
||||
Our golang bindings rely on cuda headers and require that they can be found as system headers. Make sure to add the `cuda/include` of your cuda installation to your CPATH
|
||||
|
||||
```
|
||||
export CPATH=$CPATH:<path/to/cuda/include>
|
||||
```
|
||||
27
wrappers/golang/build.sh
Executable file
27
wrappers/golang/build.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
G2_DEFINED=OFF
|
||||
|
||||
if [[ $2 ]]
|
||||
then
|
||||
G2_DEFINED=ON
|
||||
fi
|
||||
|
||||
BUILD_DIR=$(realpath "$PWD/../../icicle/build")
|
||||
SUPPORTED_CURVES=("bn254" "bls12_377" "bls12_381" "bw6_761")
|
||||
|
||||
if [[ $1 == "all" ]]
|
||||
then
|
||||
BUILD_CURVES=("${SUPPORTED_CURVES[@]}")
|
||||
else
|
||||
BUILD_CURVES=( $1 )
|
||||
fi
|
||||
|
||||
cd ../../icicle
|
||||
mkdir -p build
|
||||
|
||||
for CURVE in "${BUILD_CURVES[@]}"
|
||||
do
|
||||
cmake -DCURVE=$CURVE -DG2_DEFINED=$G2_DEFINED -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake --build build
|
||||
done
|
||||
43
wrappers/golang/core/error.go
Normal file
43
wrappers/golang/core/error.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
)
|
||||
|
||||
type IcicleErrorCode int
|
||||
|
||||
const (
|
||||
IcicleSuccess IcicleErrorCode = 0
|
||||
InvalidArgument IcicleErrorCode = 1
|
||||
MemoryAllocationError IcicleErrorCode = 2
|
||||
InternalCudaError IcicleErrorCode = 199999999
|
||||
UndefinedError IcicleErrorCode = 999999999
|
||||
)
|
||||
|
||||
type IcicleError struct {
|
||||
IcicleErrorCode IcicleErrorCode
|
||||
CudaErrorCode cuda_runtime.CudaError
|
||||
reason string
|
||||
}
|
||||
|
||||
func FromCudaError(error cuda_runtime.CudaError) (err IcicleError) {
|
||||
switch error {
|
||||
case cuda_runtime.CudaSuccess:
|
||||
err.IcicleErrorCode = IcicleSuccess
|
||||
default:
|
||||
err.IcicleErrorCode = InternalCudaError
|
||||
}
|
||||
|
||||
err.CudaErrorCode = error
|
||||
err.reason = "Runtime CUDA error."
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func FromCodeAndReason(code IcicleErrorCode, reason string) IcicleError {
|
||||
return IcicleError{
|
||||
IcicleErrorCode: code,
|
||||
reason: reason,
|
||||
CudaErrorCode: cuda_runtime.CudaErrorUnknown,
|
||||
}
|
||||
}
|
||||
76
wrappers/golang/core/internal/curve.go
Normal file
76
wrappers/golang/core/internal/curve.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package internal
|
||||
|
||||
type MockProjective struct {
|
||||
X, Y, Z MockField
|
||||
}
|
||||
|
||||
func (p MockProjective) Size() int {
|
||||
return p.X.Size() * 3
|
||||
}
|
||||
|
||||
func (p MockProjective) AsPointer() *uint32 {
|
||||
return p.X.AsPointer()
|
||||
}
|
||||
|
||||
func (p *MockProjective) Zero() MockProjective {
|
||||
p.X.Zero()
|
||||
p.Y.One()
|
||||
p.Z.Zero()
|
||||
|
||||
return *p
|
||||
}
|
||||
|
||||
func (p *MockProjective) FromLimbs(x, y, z []uint32) MockProjective {
|
||||
p.X.FromLimbs(x)
|
||||
p.Y.FromLimbs(y)
|
||||
p.Z.FromLimbs(z)
|
||||
|
||||
return *p
|
||||
}
|
||||
|
||||
func (p *MockProjective) FromAffine(a MockAffine) MockProjective {
|
||||
z := MockField{}
|
||||
z.One()
|
||||
|
||||
p.X = a.X
|
||||
p.Y = a.Y
|
||||
p.Z = z
|
||||
|
||||
return *p
|
||||
}
|
||||
|
||||
type MockAffine struct {
|
||||
X, Y MockField
|
||||
}
|
||||
|
||||
func (a MockAffine) Size() int {
|
||||
return a.X.Size() * 2
|
||||
}
|
||||
|
||||
func (a MockAffine) AsPointer() *uint32 {
|
||||
return a.X.AsPointer()
|
||||
}
|
||||
|
||||
func (a *MockAffine) Zero() MockAffine {
|
||||
a.X.Zero()
|
||||
a.Y.Zero()
|
||||
|
||||
return *a
|
||||
}
|
||||
|
||||
func (a *MockAffine) FromLimbs(x, y []uint32) MockAffine {
|
||||
a.X.FromLimbs(x)
|
||||
a.Y.FromLimbs(y)
|
||||
|
||||
return *a
|
||||
}
|
||||
|
||||
func (a MockAffine) ToProjective() MockProjective {
|
||||
var z MockField
|
||||
|
||||
return MockProjective{
|
||||
X: a.X,
|
||||
Y: a.Y,
|
||||
Z: z.One(),
|
||||
}
|
||||
}
|
||||
101
wrappers/golang/core/internal/curve_test.go
Normal file
101
wrappers/golang/core/internal/curve_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMockAffineZero(t *testing.T) {
|
||||
var fieldZero = MockField{}
|
||||
|
||||
var affineZero MockAffine
|
||||
assert.Equal(t, affineZero.X, fieldZero)
|
||||
assert.Equal(t, affineZero.Y, fieldZero)
|
||||
|
||||
x := generateRandomLimb(int(BASE_LIMBS))
|
||||
y := generateRandomLimb(int(BASE_LIMBS))
|
||||
var affine MockAffine
|
||||
affine.FromLimbs(x, y)
|
||||
|
||||
affine.Zero()
|
||||
assert.Equal(t, affine.X, fieldZero)
|
||||
assert.Equal(t, affine.Y, fieldZero)
|
||||
}
|
||||
|
||||
func TestMockAffineFromLimbs(t *testing.T) {
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
randLimbs2 := generateRandomLimb(int(BASE_LIMBS))
|
||||
|
||||
var affine MockAffine
|
||||
affine.FromLimbs(randLimbs, randLimbs2)
|
||||
|
||||
assert.ElementsMatch(t, randLimbs, affine.X.GetLimbs())
|
||||
assert.ElementsMatch(t, randLimbs2, affine.Y.GetLimbs())
|
||||
}
|
||||
|
||||
func TestMockAffineToProjective(t *testing.T) {
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
randLimbs2 := generateRandomLimb(int(BASE_LIMBS))
|
||||
var fieldOne MockField
|
||||
fieldOne.One()
|
||||
|
||||
var expected MockProjective
|
||||
expected.FromLimbs(randLimbs, randLimbs2, fieldOne.limbs[:])
|
||||
|
||||
var affine MockAffine
|
||||
affine.FromLimbs(randLimbs, randLimbs2)
|
||||
|
||||
projectivePoint := affine.ToProjective()
|
||||
assert.Equal(t, expected, projectivePoint)
|
||||
}
|
||||
|
||||
func TestMockProjectiveZero(t *testing.T) {
|
||||
var projectiveZero MockProjective
|
||||
projectiveZero.Zero()
|
||||
var fieldZero = MockField{}
|
||||
var fieldOne MockField
|
||||
fieldOne.One()
|
||||
|
||||
assert.Equal(t, projectiveZero.X, fieldZero)
|
||||
assert.Equal(t, projectiveZero.Y, fieldOne)
|
||||
assert.Equal(t, projectiveZero.Z, fieldZero)
|
||||
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
var projective MockProjective
|
||||
projective.FromLimbs(randLimbs, randLimbs, randLimbs)
|
||||
|
||||
projective.Zero()
|
||||
assert.Equal(t, projective.X, fieldZero)
|
||||
assert.Equal(t, projective.Y, fieldOne)
|
||||
assert.Equal(t, projective.Z, fieldZero)
|
||||
}
|
||||
|
||||
func TestMockProjectiveFromLimbs(t *testing.T) {
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
randLimbs2 := generateRandomLimb(int(BASE_LIMBS))
|
||||
randLimbs3 := generateRandomLimb(int(BASE_LIMBS))
|
||||
|
||||
var projective MockProjective
|
||||
projective.FromLimbs(randLimbs, randLimbs2, randLimbs3)
|
||||
|
||||
assert.ElementsMatch(t, randLimbs, projective.X.GetLimbs())
|
||||
assert.ElementsMatch(t, randLimbs2, projective.Y.GetLimbs())
|
||||
assert.ElementsMatch(t, randLimbs3, projective.Z.GetLimbs())
|
||||
}
|
||||
|
||||
func TestMockProjectiveFromAffine(t *testing.T) {
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
randLimbs2 := generateRandomLimb(int(BASE_LIMBS))
|
||||
var fieldOne MockField
|
||||
fieldOne.One()
|
||||
|
||||
var expected MockProjective
|
||||
expected.FromLimbs(randLimbs, randLimbs2, fieldOne.limbs[:])
|
||||
|
||||
var affine MockAffine
|
||||
affine.FromLimbs(randLimbs, randLimbs2)
|
||||
|
||||
var projectivePoint MockProjective
|
||||
projectivePoint.FromAffine(affine)
|
||||
assert.Equal(t, expected, projectivePoint)
|
||||
}
|
||||
79
wrappers/golang/core/internal/field.go
Normal file
79
wrappers/golang/core/internal/field.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
BASE_LIMBS int8 = 8
|
||||
)
|
||||
|
||||
type MockField struct {
|
||||
limbs [BASE_LIMBS]uint32
|
||||
}
|
||||
|
||||
func (f MockField) Len() int {
|
||||
return int(BASE_LIMBS)
|
||||
}
|
||||
|
||||
func (f MockField) Size() int {
|
||||
return int(BASE_LIMBS * 4)
|
||||
}
|
||||
|
||||
func (f MockField) GetLimbs() []uint32 {
|
||||
return f.limbs[:]
|
||||
}
|
||||
|
||||
func (f MockField) AsPointer() *uint32 {
|
||||
return &f.limbs[0]
|
||||
}
|
||||
|
||||
func (f *MockField) FromLimbs(limbs []uint32) MockField {
|
||||
if len(limbs) != f.Len() {
|
||||
panic("Called FromLimbs with limbs of different length than field")
|
||||
}
|
||||
for i := range f.limbs {
|
||||
f.limbs[i] = limbs[i]
|
||||
}
|
||||
|
||||
return *f
|
||||
}
|
||||
|
||||
func (f *MockField) Zero() MockField {
|
||||
for i := range f.limbs {
|
||||
f.limbs[i] = 0
|
||||
}
|
||||
|
||||
return *f
|
||||
}
|
||||
|
||||
func (f *MockField) One() MockField {
|
||||
for i := range f.limbs {
|
||||
f.limbs[i] = 0
|
||||
}
|
||||
f.limbs[0] = 1
|
||||
|
||||
return *f
|
||||
}
|
||||
|
||||
func (f *MockField) FromBytesLittleEndian(bytes []byte) MockField {
|
||||
if len(bytes)/4 != f.Len() {
|
||||
panic(fmt.Sprintf("Called FromBytesLittleEndian with incorrect bytes length; expected %d - got %d", f.Len()*4, len(bytes)))
|
||||
}
|
||||
|
||||
for i := range f.limbs {
|
||||
f.limbs[i] = binary.LittleEndian.Uint32(bytes[i*4 : i*4+4])
|
||||
}
|
||||
|
||||
return *f
|
||||
}
|
||||
|
||||
func (f MockField) ToBytesLittleEndian() []byte {
|
||||
bytes := make([]byte, f.Len()*4)
|
||||
for i, v := range f.limbs {
|
||||
binary.LittleEndian.PutUint32(bytes[i*4:], v)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
82
wrappers/golang/core/internal/field_test.go
Normal file
82
wrappers/golang/core/internal/field_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMockFieldFromLimbs(t *testing.T) {
|
||||
emptyField := MockField{}
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
assert.ElementsMatch(t, randLimbs, emptyField.limbs, "Limbs do not match; there was an issue with setting the MockField's limbs")
|
||||
randLimbs[0] = 100
|
||||
assert.NotEqual(t, randLimbs, emptyField.limbs)
|
||||
}
|
||||
|
||||
func TestMockFieldGetLimbs(t *testing.T) {
|
||||
emptyField := MockField{}
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
|
||||
assert.ElementsMatch(t, randLimbs, emptyField.GetLimbs(), "Limbs do not match; there was an issue with setting the MockField's limbs")
|
||||
}
|
||||
|
||||
func TestMockFieldOne(t *testing.T) {
|
||||
var emptyField MockField
|
||||
emptyField.One()
|
||||
limbOne := generateLimbOne(int(BASE_LIMBS))
|
||||
assert.ElementsMatch(t, emptyField.GetLimbs(), limbOne, "Empty field to field one did not work")
|
||||
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
|
||||
emptyField.One()
|
||||
assert.ElementsMatch(t, emptyField.GetLimbs(), limbOne, "MockField with limbs to field one did not work")
|
||||
}
|
||||
|
||||
func TestMockFieldZero(t *testing.T) {
|
||||
var emptyField MockField
|
||||
emptyField.Zero()
|
||||
limbsZero := make([]uint32, BASE_LIMBS)
|
||||
assert.ElementsMatch(t, emptyField.GetLimbs(), limbsZero, "Empty field to field zero failed")
|
||||
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
|
||||
emptyField.Zero()
|
||||
assert.ElementsMatch(t, emptyField.GetLimbs(), limbsZero, "MockField with limbs to field zero failed")
|
||||
}
|
||||
|
||||
func TestMockFieldSize(t *testing.T) {
|
||||
var emptyField MockField
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
|
||||
assert.Equal(t, len(randLimbs)*4, emptyField.Size(), "Size returned an incorrect value of bytes")
|
||||
}
|
||||
|
||||
func TestMockFieldAsPointer(t *testing.T) {
|
||||
var emptyField MockField
|
||||
randLimbs := generateRandomLimb(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(randLimbs[:])
|
||||
|
||||
assert.Equal(t, randLimbs[0], *emptyField.AsPointer(), "AsPointer returned pointer to incorrect value")
|
||||
}
|
||||
|
||||
func TestMockFieldFromBytes(t *testing.T) {
|
||||
var emptyField MockField
|
||||
bytes, expected := generateBytesArray(int(BASE_LIMBS))
|
||||
|
||||
emptyField.FromBytesLittleEndian(bytes)
|
||||
|
||||
assert.ElementsMatch(t, emptyField.GetLimbs(), expected, "FromBytes returned incorrect values")
|
||||
}
|
||||
|
||||
func TestMockFieldToBytes(t *testing.T) {
|
||||
var emptyField MockField
|
||||
expected, limbs := generateBytesArray(int(BASE_LIMBS))
|
||||
emptyField.FromLimbs(limbs)
|
||||
|
||||
assert.ElementsMatch(t, emptyField.ToBytesLittleEndian(), expected, "ToBytes returned incorrect values")
|
||||
}
|
||||
31
wrappers/golang/core/internal/helpers_test.go
Normal file
31
wrappers/golang/core/internal/helpers_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func generateRandomLimb(size int) []uint32 {
|
||||
limbs := make([]uint32, size)
|
||||
for i := range limbs {
|
||||
limbs[i] = rand.Uint32()
|
||||
}
|
||||
return limbs
|
||||
}
|
||||
|
||||
func generateLimbOne(size int) []uint32 {
|
||||
limbs := make([]uint32, size)
|
||||
limbs[0] = 1
|
||||
return limbs
|
||||
}
|
||||
|
||||
func generateBytesArray(size int) ([]byte, []uint32) {
|
||||
baseBytes := []byte{1, 2, 3, 4}
|
||||
var bytes []byte
|
||||
var limbs []uint32
|
||||
for i := 0; i < size; i++ {
|
||||
bytes = append(bytes, baseBytes...)
|
||||
limbs = append(limbs, 67305985)
|
||||
}
|
||||
|
||||
return bytes, limbs
|
||||
}
|
||||
106
wrappers/golang/core/msm.go
Normal file
106
wrappers/golang/core/msm.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
)
|
||||
|
||||
type MSMConfig struct {
|
||||
/// Details related to the device such as its id and stream.
|
||||
Ctx cuda_runtime.DeviceContext
|
||||
|
||||
pointsSize int32
|
||||
|
||||
/// The number of extra points to pre-compute for each point. Larger values decrease the number of computations
|
||||
/// to make, on-line memory footprint, but increase the static memory footprint. Default value: 1 (i.e. don't pre-compute).
|
||||
PrecomputeFactor int32
|
||||
|
||||
/// `c` value, or "window bitsize" which is the main parameter of the "bucket method"
|
||||
/// that we use to solve the MSM problem. As a rule of thumb, larger value means more on-line memory
|
||||
/// footprint but also more parallelism and less computational complexity (up to a certain point).
|
||||
/// Default value: 0 (the optimal value of `c` is chosen automatically).
|
||||
C int32
|
||||
|
||||
/// Number of bits of the largest scalar. Typically equals the bitsize of scalar field, but if a different
|
||||
/// (better) upper bound is known, it should be reflected in this variable. Default value: 0 (set to the bitsize of scalar field).
|
||||
Bitsize int32
|
||||
|
||||
/// Variable that controls how sensitive the algorithm is to the buckets that occur very frequently.
|
||||
/// Useful for efficient treatment of non-uniform distributions of scalars and "top windows" with few bits.
|
||||
/// Can be set to 0 to disable separate treatment of large buckets altogether. Default value: 10.
|
||||
LargeBucketFactor int32
|
||||
|
||||
batchSize int32
|
||||
|
||||
areScalarsOnDevice bool
|
||||
|
||||
/// True if scalars are in Montgomery form and false otherwise. Default value: true.
|
||||
AreScalarsMontgomeryForm bool
|
||||
|
||||
arePointsOnDevice bool
|
||||
|
||||
/// True if coordinates of points are in Montgomery form and false otherwise. Default value: true.
|
||||
ArePointsMontgomeryForm bool
|
||||
|
||||
areResultsOnDevice bool
|
||||
|
||||
/// Whether to do "bucket accumulation" serially. Decreases computational complexity, but also greatly
|
||||
/// decreases parallelism, so only suitable for large batches of MSMs. Default value: false.
|
||||
IsBigTriangle bool
|
||||
|
||||
/// Whether to run the MSM asynchronously. If set to `true`, the MSM function will be non-blocking
|
||||
/// and you'd need to synchronize it explicitly by running `cudaStreamSynchronize` or `cudaDeviceSynchronize`.
|
||||
/// If set to `false`, the MSM function will block the current CPU thread.
|
||||
IsAsync bool
|
||||
}
|
||||
|
||||
// type MSM interface {
|
||||
// Msm(scalars, points *cuda_runtime.HostOrDeviceSlice, cfg *MSMConfig, results *cuda_runtime.HostOrDeviceSlice) cuda_runtime.CudaError
|
||||
// GetDefaultMSMConfig() MSMConfig
|
||||
// }
|
||||
|
||||
func GetDefaultMSMConfig() MSMConfig {
|
||||
ctx, _ := cuda_runtime.GetDefaultDeviceContext()
|
||||
return MSMConfig{
|
||||
ctx, // Ctx
|
||||
0, // pointsSize
|
||||
1, // PrecomputeFactor
|
||||
0, // C
|
||||
0, // Bitsize
|
||||
10, // LargeBucketFactor
|
||||
1, // batchSize
|
||||
false, // areScalarsOnDevice
|
||||
false, // AreScalarsMontgomeryForm
|
||||
false, // arePointsOnDevice
|
||||
false, // ArePointsMontgomeryForm
|
||||
false, // areResultsOnDevice
|
||||
false, // IsBigTriangle
|
||||
false, // IsAsync
|
||||
}
|
||||
}
|
||||
|
||||
func MsmCheck(scalars HostOrDeviceSlice, points HostOrDeviceSlice, cfg *MSMConfig, results HostOrDeviceSlice) {
|
||||
scalarsLength, pointsLength, resultsLength := scalars.Len(), points.Len(), results.Len()
|
||||
if scalarsLength%pointsLength != 0 {
|
||||
errorString := fmt.Sprintf(
|
||||
"Number of points %d does not divide the number of scalars %d",
|
||||
pointsLength,
|
||||
scalarsLength,
|
||||
)
|
||||
panic(errorString)
|
||||
}
|
||||
if scalarsLength%resultsLength != 0 {
|
||||
errorString := fmt.Sprintf(
|
||||
"Number of results %d does not divide the number of scalars %d",
|
||||
resultsLength,
|
||||
scalarsLength,
|
||||
)
|
||||
panic(errorString)
|
||||
}
|
||||
cfg.pointsSize = int32(pointsLength)
|
||||
cfg.batchSize = int32(resultsLength)
|
||||
cfg.areScalarsOnDevice = scalars.IsOnDevice()
|
||||
cfg.arePointsOnDevice = points.IsOnDevice()
|
||||
cfg.areResultsOnDevice = results.IsOnDevice()
|
||||
}
|
||||
106
wrappers/golang/core/msm_test.go
Normal file
106
wrappers/golang/core/msm_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core/internal"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMSMDefaultConfig(t *testing.T) {
|
||||
ctx, _ := cuda_runtime.GetDefaultDeviceContext()
|
||||
expected := MSMConfig{
|
||||
ctx, // Ctx
|
||||
0, // pointsSize
|
||||
1, // PrecomputeFactor
|
||||
0, // C
|
||||
0, // Bitsize
|
||||
10, // LargeBucketFactor
|
||||
1, // batchSize
|
||||
false, // areScalarsOnDevice
|
||||
false, // AreScalarsMontgomeryForm
|
||||
false, // arePointsOnDevice
|
||||
false, // ArePointsMontgomeryForm
|
||||
false, // areResultsOnDevice
|
||||
false, // IsBigTriangle
|
||||
false, // IsAsync
|
||||
}
|
||||
|
||||
actual := GetDefaultMSMConfig()
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestMSMCheckHostSlices(t *testing.T) {
|
||||
cfg := GetDefaultMSMConfig()
|
||||
|
||||
randLimbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
rawScalars := make([]internal.MockField, 10)
|
||||
for i := range rawScalars {
|
||||
var emptyField internal.MockField
|
||||
emptyField.FromLimbs(randLimbs)
|
||||
|
||||
rawScalars[i] = emptyField
|
||||
}
|
||||
scalars := HostSliceFromElements[internal.MockField](rawScalars)
|
||||
|
||||
affine := internal.MockAffine{}
|
||||
limbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
affine.FromLimbs(limbs, limbs)
|
||||
rawAffinePoints := make([]internal.MockAffine, 10)
|
||||
for i := range rawAffinePoints {
|
||||
rawAffinePoints[i] = affine
|
||||
}
|
||||
points := HostSliceFromElements[internal.MockAffine](rawAffinePoints)
|
||||
|
||||
output := make(HostSlice[internal.MockProjective], 1)
|
||||
assert.NotPanics(t, func() { MsmCheck(scalars, points, &cfg, output) })
|
||||
assert.False(t, cfg.areScalarsOnDevice)
|
||||
assert.False(t, cfg.arePointsOnDevice)
|
||||
assert.False(t, cfg.areResultsOnDevice)
|
||||
assert.Equal(t, int32(1), cfg.batchSize)
|
||||
|
||||
output2 := make(HostSlice[internal.MockProjective], 3)
|
||||
assert.Panics(t, func() { MsmCheck(scalars, points, &cfg, output2) })
|
||||
}
|
||||
|
||||
func TestMSMCheckDeviceSlices(t *testing.T) {
|
||||
cfg := GetDefaultMSMConfig()
|
||||
|
||||
randLimbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
rawScalars := make([]internal.MockField, 10)
|
||||
for i := range rawScalars {
|
||||
var emptyField internal.MockField
|
||||
emptyField.FromLimbs(randLimbs)
|
||||
|
||||
rawScalars[i] = emptyField
|
||||
}
|
||||
scalars := HostSliceFromElements[internal.MockField](rawScalars)
|
||||
var scalarsOnDevice DeviceSlice
|
||||
scalars.CopyToDevice(&scalarsOnDevice, true)
|
||||
|
||||
affine := internal.MockAffine{}
|
||||
limbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
affine.FromLimbs(limbs, limbs)
|
||||
rawAffinePoints := make([]internal.MockAffine, 10)
|
||||
for i := range rawAffinePoints {
|
||||
rawAffinePoints[i] = affine
|
||||
}
|
||||
points := HostSliceFromElements[internal.MockAffine](rawAffinePoints)
|
||||
var pointsOnDevice DeviceSlice
|
||||
points.CopyToDevice(&pointsOnDevice, true)
|
||||
|
||||
output := make(HostSlice[internal.MockProjective], 1)
|
||||
assert.NotPanics(t, func() { MsmCheck(scalarsOnDevice, pointsOnDevice, &cfg, output) })
|
||||
assert.True(t, cfg.areScalarsOnDevice)
|
||||
assert.True(t, cfg.arePointsOnDevice)
|
||||
assert.False(t, cfg.areResultsOnDevice)
|
||||
assert.Equal(t, int32(1), cfg.batchSize)
|
||||
|
||||
output2 := make(HostSlice[internal.MockProjective], 3)
|
||||
assert.Panics(t, func() { MsmCheck(scalarsOnDevice, pointsOnDevice, &cfg, output2) })
|
||||
}
|
||||
|
||||
// TODO add check for batches and batchSize
|
||||
68
wrappers/golang/core/ntt.go
Normal file
68
wrappers/golang/core/ntt.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
)
|
||||
|
||||
type NTTDir int8
|
||||
|
||||
const (
|
||||
KForward NTTDir = iota
|
||||
KInverse NTTDir = 1
|
||||
)
|
||||
|
||||
type Ordering uint32
|
||||
|
||||
const (
|
||||
KNN Ordering = iota
|
||||
KNR Ordering = 1
|
||||
KRN Ordering = 2
|
||||
KRR Ordering = 3
|
||||
KNM Ordering = 4
|
||||
KMN Ordering = 5
|
||||
)
|
||||
|
||||
type NTTConfig[T any] struct {
|
||||
/// Details related to the device such as its id and stream id. See [DeviceContext](@ref device_context::DeviceContext).
|
||||
Ctx cuda_runtime.DeviceContext
|
||||
/// Coset generator. Used to perform coset (i)NTTs. Default value: `S::one()` (corresponding to no coset being used).
|
||||
CosetGen T
|
||||
/// The number of NTTs to compute. Default value: 1.
|
||||
BatchSize int32
|
||||
/// Ordering of inputs and outputs. See [Ordering](@ref Ordering). Default value: `Ordering::kNN`.
|
||||
Ordering Ordering
|
||||
areInputsOnDevice bool
|
||||
areOutputsOnDevice bool
|
||||
/// Whether to run the NTT asynchronously. If set to `true`, the NTT function will be non-blocking and you'd need to synchronize
|
||||
/// it explicitly by running `stream.synchronize()`. If set to false, the NTT function will block the current CPU thread.
|
||||
IsAsync bool
|
||||
}
|
||||
|
||||
func GetDefaultNTTConfig[T any](cosetGen T) NTTConfig[T] {
|
||||
ctx, _ := cuda_runtime.GetDefaultDeviceContext()
|
||||
return NTTConfig[T]{
|
||||
ctx, // Ctx
|
||||
cosetGen, // CosetGen
|
||||
1, // BatchSize
|
||||
KNN, // Ordering
|
||||
false, // areInputsOnDevice
|
||||
false, // areOutputsOnDevice
|
||||
false, // IsAsync
|
||||
}
|
||||
}
|
||||
|
||||
func NttCheck[T any](input HostOrDeviceSlice, cfg *NTTConfig[T], output HostOrDeviceSlice) {
|
||||
inputLen, outputLen := input.Len(), output.Len()
|
||||
if inputLen != outputLen {
|
||||
errorString := fmt.Sprintf(
|
||||
"input and output capacities %d; %d are not equal",
|
||||
inputLen,
|
||||
outputLen,
|
||||
)
|
||||
panic(errorString)
|
||||
}
|
||||
cfg.areInputsOnDevice = input.IsOnDevice()
|
||||
cfg.areOutputsOnDevice = output.IsOnDevice()
|
||||
}
|
||||
96
wrappers/golang/core/ntt_test.go
Normal file
96
wrappers/golang/core/ntt_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
// "unsafe"
|
||||
"testing"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core/internal"
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNTTDefaultConfig(t *testing.T) {
|
||||
var cosetGenField internal.MockField
|
||||
cosetGenField.One()
|
||||
var cosetGen [1]uint32
|
||||
copy(cosetGen[:], cosetGenField.GetLimbs())
|
||||
ctx, _ := cuda_runtime.GetDefaultDeviceContext()
|
||||
expected := NTTConfig[[1]uint32]{
|
||||
ctx, // Ctx
|
||||
cosetGen, // CosetGen
|
||||
1, // BatchSize
|
||||
KNN, // Ordering
|
||||
false, // areInputsOnDevice
|
||||
false, // areOutputsOnDevice
|
||||
false, // IsAsync
|
||||
}
|
||||
|
||||
actual := GetDefaultNTTConfig(cosetGen)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestNTTCheckHostScalars(t *testing.T) {
|
||||
randLimbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
var cosetGen internal.MockField
|
||||
cosetGen.FromLimbs(randLimbs)
|
||||
cfg := GetDefaultNTTConfig(&cosetGen)
|
||||
|
||||
rawInput := make([]internal.MockField, 10)
|
||||
var emptyField internal.MockField
|
||||
emptyField.FromLimbs(randLimbs)
|
||||
|
||||
for i := range rawInput {
|
||||
rawInput[i] = emptyField
|
||||
}
|
||||
|
||||
input := HostSliceFromElements[internal.MockField](rawInput)
|
||||
output := HostSliceFromElements[internal.MockField](rawInput)
|
||||
assert.NotPanics(t, func() { NttCheck(input, &cfg, output) })
|
||||
assert.False(t, cfg.areInputsOnDevice)
|
||||
assert.False(t, cfg.areOutputsOnDevice)
|
||||
|
||||
rawInputLarger := make([]internal.MockField, 11)
|
||||
for i := range rawInputLarger {
|
||||
rawInputLarger[i] = emptyField
|
||||
}
|
||||
output2 := HostSliceFromElements[internal.MockField](rawInputLarger)
|
||||
assert.Panics(t, func() { NttCheck(input, &cfg, output2) })
|
||||
}
|
||||
|
||||
func TestNTTCheckDeviceScalars(t *testing.T) {
|
||||
randLimbs := []uint32{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
|
||||
var cosetGen internal.MockField
|
||||
cosetGen.FromLimbs(randLimbs)
|
||||
cfg := GetDefaultNTTConfig(cosetGen)
|
||||
|
||||
fieldBytesSize := 16
|
||||
numFields := 10
|
||||
rawInput := make([]internal.MockField, numFields)
|
||||
for i := range rawInput {
|
||||
var emptyField internal.MockField
|
||||
emptyField.FromLimbs(randLimbs)
|
||||
|
||||
rawInput[i] = emptyField
|
||||
}
|
||||
|
||||
hostElements := HostSliceFromElements[internal.MockField](rawInput)
|
||||
|
||||
var input DeviceSlice
|
||||
hostElements.CopyToDevice(&input, true)
|
||||
|
||||
var output DeviceSlice
|
||||
output.Malloc(numFields*fieldBytesSize, fieldBytesSize)
|
||||
|
||||
assert.NotPanics(t, func() { NttCheck(input, &cfg, output) })
|
||||
assert.True(t, cfg.areInputsOnDevice)
|
||||
assert.True(t, cfg.areOutputsOnDevice)
|
||||
|
||||
var output2 DeviceSlice
|
||||
output2.Malloc((numFields+1)*fieldBytesSize, fieldBytesSize)
|
||||
assert.Panics(t, func() { NttCheck(input, &cfg, output2) })
|
||||
}
|
||||
|
||||
// TODO add check for batches and batchSize
|
||||
160
wrappers/golang/core/slice.go
Normal file
160
wrappers/golang/core/slice.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/cuda_runtime"
|
||||
)
|
||||
|
||||
type HostOrDeviceSlice interface {
|
||||
Len() int
|
||||
Cap() int
|
||||
IsEmpty() bool
|
||||
IsOnDevice() bool
|
||||
}
|
||||
|
||||
type DevicePointer = unsafe.Pointer
|
||||
|
||||
type DeviceSlice struct {
|
||||
inner unsafe.Pointer
|
||||
// capacity is the number of bytes that have been allocated
|
||||
capacity int
|
||||
// length is the number of elements that have been written
|
||||
length int
|
||||
}
|
||||
|
||||
func (d DeviceSlice) Len() int {
|
||||
return d.length
|
||||
}
|
||||
|
||||
func (d DeviceSlice) Cap() int {
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
func (d DeviceSlice) IsEmpty() bool {
|
||||
return d.length == 0
|
||||
}
|
||||
|
||||
func (d DeviceSlice) AsPointer() unsafe.Pointer {
|
||||
return d.inner
|
||||
}
|
||||
|
||||
func (d DeviceSlice) IsOnDevice() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: change signature to be Malloc(element, numElements)
|
||||
// calc size internally
|
||||
func (d *DeviceSlice) Malloc(size, sizeOfElement int) (DeviceSlice, cuda_runtime.CudaError) {
|
||||
dp, err := cuda_runtime.Malloc(uint(size))
|
||||
d.inner = dp
|
||||
d.capacity = size
|
||||
d.length = size / sizeOfElement
|
||||
return *d, err
|
||||
}
|
||||
|
||||
func (d *DeviceSlice) MallocAsync(size, sizeOfElement int, stream cuda_runtime.CudaStream) (DeviceSlice, cuda_runtime.CudaError) {
|
||||
dp, err := cuda_runtime.MallocAsync(uint(size), stream)
|
||||
d.inner = dp
|
||||
d.capacity = size
|
||||
d.length = size / sizeOfElement
|
||||
return *d, err
|
||||
}
|
||||
|
||||
func (d *DeviceSlice) Free() cuda_runtime.CudaError {
|
||||
err := cuda_runtime.Free(d.inner)
|
||||
if err == cuda_runtime.CudaSuccess {
|
||||
d.length, d.capacity = 0, 0
|
||||
d.inner = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type HostSliceInterface interface {
|
||||
Size() int
|
||||
}
|
||||
|
||||
type HostSlice[T HostSliceInterface] []T
|
||||
|
||||
func HostSliceFromElements[T HostSliceInterface](elements []T) HostSlice[T] {
|
||||
slice := make(HostSlice[T], len(elements))
|
||||
copy(slice, elements)
|
||||
|
||||
return slice
|
||||
}
|
||||
|
||||
func HostSliceWithValue[T HostSliceInterface](underlyingValue T, size int) HostSlice[T] {
|
||||
slice := make(HostSlice[T], size)
|
||||
for i := range slice {
|
||||
slice[i] = underlyingValue
|
||||
}
|
||||
|
||||
return slice
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) Cap() int {
|
||||
return cap(h)
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) IsEmpty() bool {
|
||||
return len(h) == 0
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) IsOnDevice() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) SizeOfElement() int {
|
||||
return h[0].Size()
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) CopyToDevice(dst *DeviceSlice, shouldAllocate bool) *DeviceSlice {
|
||||
size := h.Len() * h.SizeOfElement()
|
||||
if shouldAllocate {
|
||||
dst.Malloc(size, h.SizeOfElement())
|
||||
}
|
||||
if size > dst.Cap() {
|
||||
panic("Number of bytes to copy is too large for destination")
|
||||
}
|
||||
|
||||
// hostSrc := unsafe.Pointer(h.AsPointer())
|
||||
hostSrc := unsafe.Pointer(&h[0])
|
||||
cuda_runtime.CopyToDevice(dst.inner, hostSrc, uint(size))
|
||||
dst.length = h.Len()
|
||||
return dst
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) CopyToDeviceAsync(dst *DeviceSlice, stream cuda_runtime.CudaStream, shouldAllocate bool) *DeviceSlice {
|
||||
size := h.Len() * h.SizeOfElement()
|
||||
if shouldAllocate {
|
||||
dst.MallocAsync(size, h.SizeOfElement(), stream)
|
||||
}
|
||||
if size > dst.Cap() {
|
||||
panic("Number of bytes to copy is too large for destination")
|
||||
}
|
||||
|
||||
hostSrc := unsafe.Pointer(&h[0])
|
||||
cuda_runtime.CopyToDeviceAsync(dst.inner, hostSrc, uint(size), stream)
|
||||
dst.length = h.Len()
|
||||
return dst
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) CopyFromDevice(src *DeviceSlice) {
|
||||
if h.Len() != src.Len() {
|
||||
panic("destination and source slices have different lengths")
|
||||
}
|
||||
bytesSize := src.Len() * h.SizeOfElement()
|
||||
cuda_runtime.CopyFromDevice(unsafe.Pointer(&h[0]), src.inner, uint(bytesSize))
|
||||
}
|
||||
|
||||
func (h HostSlice[T]) CopyFromDeviceAsync(src *DeviceSlice, stream cuda_runtime.Stream) {
|
||||
if h.Len() != src.Len() {
|
||||
panic("destination and source slices have different lengths")
|
||||
}
|
||||
bytesSize := src.Len() * h.SizeOfElement()
|
||||
cuda_runtime.CopyFromDeviceAsync(unsafe.Pointer(&h[0]), src.inner, uint(bytesSize), stream)
|
||||
}
|
||||
192
wrappers/golang/core/slice_test.go
Normal file
192
wrappers/golang/core/slice_test.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ingonyama-zk/icicle/wrappers/golang/core/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func randomField(size int) internal.MockField {
|
||||
limbs := make([]uint32, size)
|
||||
for i := range limbs {
|
||||
limbs[i] = rand.Uint32()
|
||||
}
|
||||
|
||||
var field internal.MockField
|
||||
field.FromLimbs(limbs)
|
||||
|
||||
return field
|
||||
}
|
||||
|
||||
func randomFields(numFields, fieldSize int) []internal.MockField {
|
||||
var randFields []internal.MockField
|
||||
|
||||
for i := 0; i < numFields; i++ {
|
||||
randFields = append(randFields, randomField(fieldSize))
|
||||
}
|
||||
|
||||
return randFields
|
||||
}
|
||||
|
||||
// This function is solely for the purpose of testing HostDeviceSlice
|
||||
// It can produce invalid points and should not be used to test curve operations
|
||||
func randomProjectivePoints(numPoints, fieldSize int) []internal.MockProjective {
|
||||
var randProjectives []internal.MockProjective
|
||||
|
||||
for i := 0; i < numPoints; i++ {
|
||||
projective := internal.MockProjective{
|
||||
X: randomField(fieldSize),
|
||||
Y: randomField(fieldSize),
|
||||
Z: randomField(fieldSize),
|
||||
}
|
||||
randProjectives = append(randProjectives, projective)
|
||||
}
|
||||
|
||||
return randProjectives
|
||||
}
|
||||
|
||||
// This function is solely for the purpose of testing HostDeviceSlice
|
||||
// It can produce invalid points and should not be used to test curve operations
|
||||
func randomAffinePoints(numPoints, fieldSize int) []internal.MockAffine {
|
||||
var randAffines []internal.MockAffine
|
||||
|
||||
for i := 0; i < numPoints; i++ {
|
||||
affine := internal.MockAffine{
|
||||
X: randomField(fieldSize),
|
||||
Y: randomField(fieldSize),
|
||||
}
|
||||
randAffines = append(randAffines, affine)
|
||||
}
|
||||
|
||||
return randAffines
|
||||
}
|
||||
|
||||
const (
|
||||
numPoints = 4
|
||||
numFields = 4
|
||||
fieldSize = 8
|
||||
fieldBytesSize = fieldSize * 4
|
||||
)
|
||||
|
||||
func TestHostSlice(t *testing.T) {
|
||||
var emptyHostSlice HostSlice[internal.MockField]
|
||||
assert.Equal(t, emptyHostSlice.Len(), 0)
|
||||
assert.Equal(t, emptyHostSlice.Cap(), 0)
|
||||
|
||||
randFields := randomFields(numFields, fieldSize)
|
||||
|
||||
hostSlice := HostSliceFromElements(randFields)
|
||||
assert.Equal(t, hostSlice.Len(), 4)
|
||||
assert.Equal(t, hostSlice.Cap(), 4)
|
||||
}
|
||||
|
||||
func TestHostSliceIsEmpty(t *testing.T) {
|
||||
var emptyHostSlice HostSlice[*internal.MockField]
|
||||
assert.True(t, emptyHostSlice.IsEmpty())
|
||||
|
||||
randFields := randomFields(numFields, fieldSize)
|
||||
|
||||
hostSlice := HostSliceFromElements(randFields)
|
||||
assert.False(t, hostSlice.IsEmpty())
|
||||
}
|
||||
|
||||
func TestHostSliceIsOnDevice(t *testing.T) {
|
||||
var emptyHostSlice HostSlice[*internal.MockField]
|
||||
assert.False(t, emptyHostSlice.IsOnDevice())
|
||||
}
|
||||
|
||||
func TestHostSliceSizeOf(t *testing.T) {
|
||||
randFields := randomFields(numFields, fieldSize)
|
||||
hostSlice := HostSliceFromElements(randFields)
|
||||
assert.Equal(t, hostSlice.SizeOfElement(), fieldSize*4)
|
||||
}
|
||||
|
||||
func TestDeviceSlice(t *testing.T) {
|
||||
var emptyDeviceSlice DeviceSlice
|
||||
assert.Equal(t, 0, emptyDeviceSlice.Len())
|
||||
assert.Equal(t, 0, emptyDeviceSlice.Cap())
|
||||
assert.Equal(t, unsafe.Pointer(nil), emptyDeviceSlice.AsPointer())
|
||||
|
||||
emptyDeviceSlice.Malloc(numFields*fieldBytesSize, fieldBytesSize)
|
||||
assert.Equal(t, numFields, emptyDeviceSlice.Len())
|
||||
assert.Equal(t, numFields*fieldBytesSize, emptyDeviceSlice.Cap())
|
||||
assert.NotEqual(t, unsafe.Pointer(nil), emptyDeviceSlice.AsPointer())
|
||||
|
||||
emptyDeviceSlice.Free()
|
||||
assert.Equal(t, 0, emptyDeviceSlice.Len())
|
||||
assert.Equal(t, 0, emptyDeviceSlice.Cap())
|
||||
assert.Equal(t, unsafe.Pointer(nil), emptyDeviceSlice.AsPointer())
|
||||
}
|
||||
|
||||
func TestDeviceSliceIsEmpty(t *testing.T) {
|
||||
var emptyDeviceSlice DeviceSlice
|
||||
assert.True(t, emptyDeviceSlice.IsEmpty())
|
||||
|
||||
const bytes = numFields * fieldBytesSize
|
||||
emptyDeviceSlice.Malloc(bytes, fieldBytesSize)
|
||||
|
||||
randFields := randomFields(numFields, fieldSize)
|
||||
hostSlice := HostSliceFromElements(randFields)
|
||||
|
||||
hostSlice.CopyToDevice(&emptyDeviceSlice, false)
|
||||
assert.False(t, emptyDeviceSlice.IsEmpty())
|
||||
}
|
||||
|
||||
func TestDeviceSliceIsOnDevice(t *testing.T) {
|
||||
var deviceSlice DeviceSlice
|
||||
assert.True(t, deviceSlice.IsOnDevice())
|
||||
}
|
||||
|
||||
func TestCopyToFromHostDeviceField(t *testing.T) {
|
||||
var emptyDeviceSlice DeviceSlice
|
||||
|
||||
numFields := 1 << 10
|
||||
randFields := randomFields(numFields, fieldSize)
|
||||
hostSlice := HostSliceFromElements(randFields)
|
||||
hostSlice.CopyToDevice(&emptyDeviceSlice, true)
|
||||
|
||||
randFields2 := randomFields(numFields, fieldSize)
|
||||
hostSlice2 := HostSliceFromElements(randFields2)
|
||||
|
||||
assert.NotEqual(t, hostSlice, hostSlice2)
|
||||
hostSlice2.CopyFromDevice(&emptyDeviceSlice)
|
||||
assert.Equal(t, hostSlice, hostSlice2)
|
||||
}
|
||||
|
||||
func TestCopyToFromHostDeviceAffinePoints(t *testing.T) {
|
||||
var emptyDeviceSlice DeviceSlice
|
||||
|
||||
numPoints := 1 << 10
|
||||
randAffines := randomAffinePoints(numPoints, fieldSize)
|
||||
hostSlice := HostSliceFromElements(randAffines)
|
||||
hostSlice.CopyToDevice(&emptyDeviceSlice, true)
|
||||
|
||||
randAffines2 := randomAffinePoints(numPoints, fieldSize)
|
||||
hostSlice2 := HostSliceFromElements(randAffines2)
|
||||
|
||||
assert.NotEqual(t, hostSlice, hostSlice2)
|
||||
hostSlice2.CopyFromDevice(&emptyDeviceSlice)
|
||||
emptyDeviceSlice.Free()
|
||||
|
||||
assert.Equal(t, hostSlice, hostSlice2)
|
||||
}
|
||||
|
||||
func TestCopyToFromHostDeviceProjectivePoints(t *testing.T) {
|
||||
var emptyDeviceSlice DeviceSlice
|
||||
|
||||
numPoints := 1 << 15
|
||||
randProjectives := randomProjectivePoints(numPoints, fieldSize)
|
||||
hostSlice := HostSliceFromElements(randProjectives)
|
||||
hostSlice.CopyToDevice(&emptyDeviceSlice, true)
|
||||
|
||||
randProjectives2 := randomProjectivePoints(numPoints, fieldSize)
|
||||
hostSlice2 := HostSliceFromElements(randProjectives2)
|
||||
|
||||
assert.NotEqual(t, hostSlice, hostSlice2)
|
||||
hostSlice2.CopyFromDevice(&emptyDeviceSlice)
|
||||
|
||||
assert.Equal(t, hostSlice, hostSlice2)
|
||||
}
|
||||
26
wrappers/golang/core/utils.go
Normal file
26
wrappers/golang/core/utils.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
func ConvertUint32ArrToUint64Arr(arr32 []uint32) []uint64 {
|
||||
arr64 := make([]uint64, len(arr32)/2)
|
||||
for i := 0; i < len(arr32); i += 2 {
|
||||
arr64[i/2] = (uint64(arr32[i]) << 32) | uint64(arr32[i+1])
|
||||
}
|
||||
return arr64
|
||||
}
|
||||
|
||||
func ConvertUint64ArrToUint32Arr(arr64 []uint64) []uint32 {
|
||||
arr32 := make([]uint32, len(arr64)*2)
|
||||
for i, v := range arr64 {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
|
||||
arr32[i*2] = binary.LittleEndian.Uint32(b[0:4])
|
||||
arr32[i*2+1] = binary.LittleEndian.Uint32(b[4:8])
|
||||
}
|
||||
|
||||
return arr32
|
||||
}
|
||||
78
wrappers/golang/core/utils_test.go
Normal file
78
wrappers/golang/core/utils_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertUint32ArrToUint64Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []uint32
|
||||
expected []uint64
|
||||
}{
|
||||
{
|
||||
name: "Test with incremental array",
|
||||
input: []uint32{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
expected: []uint64{4294967298, 12884901892, 21474836486, 30064771080},
|
||||
},
|
||||
{
|
||||
name: "Test with all zeros",
|
||||
input: []uint32{0, 0, 0, 0, 0, 0, 0, 0},
|
||||
expected: []uint64{0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "Test with maximum uint32 values",
|
||||
input: []uint32{4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295, 4294967295},
|
||||
expected: []uint64{18446744073709551615, 18446744073709551615, 18446744073709551615, 18446744073709551615},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating min and max uint32 values",
|
||||
input: []uint32{0, 4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295},
|
||||
expected: []uint64{4294967295, 4294967295, 4294967295, 4294967295},
|
||||
},
|
||||
{
|
||||
name: "Test with alternating max and min uint32 values",
|
||||
input: []uint32{4294967295, 0, 4294967295, 0, 4294967295, 0, 4294967295, 0},
|
||||
expected: []uint64{18446744069414584320, 18446744069414584320, 18446744069414584320, 18446744069414584320},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint32ArrToUint64Arr(tc.input)
|
||||
assert.Equal(t, tc.expected, got, "Got %v, %v", got, tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertUint64ArrToUint32Arr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []uint64
|
||||
expected []uint32
|
||||
}{
|
||||
{
|
||||
name: "test one",
|
||||
input: []uint64{1, 2, 3, 4},
|
||||
expected: []uint32{1, 0, 2, 0, 3, 0, 4, 0},
|
||||
},
|
||||
{
|
||||
name: "test two",
|
||||
input: []uint64{100, 200, 300, 400},
|
||||
expected: []uint32{100, 0, 200, 0, 300, 0, 400, 0},
|
||||
},
|
||||
{
|
||||
name: "test three",
|
||||
input: []uint64{1000, 2000, 3000, 4000},
|
||||
expected: []uint32{1000, 0, 2000, 0, 3000, 0, 4000, 0},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ConvertUint64ArrToUint32Arr(tc.input)
|
||||
assert.Equal(t, tc.expected, got, "Got %v, %v", got, tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
179
wrappers/golang/cuda_runtime/const.go
Normal file
179
wrappers/golang/cuda_runtime/const.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package cuda_runtime
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -L/usr/local/cuda/lib64 -lcudart
|
||||
#cgo CFLAGS: -I /usr/local/cuda/include
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type CudaStreamCreateFlags C.uint
|
||||
|
||||
const (
|
||||
// CudaStreamDefault as defined in include/driver_types.h:98
|
||||
CudaStreamDefault CudaStreamCreateFlags = iota
|
||||
// CudaStreamNonBlocking as defined in include/driver_types.h:99
|
||||
CudaStreamNonBlocking CudaStreamCreateFlags = 1
|
||||
)
|
||||
|
||||
type CudaStreamWaitFlags C.uint
|
||||
|
||||
const (
|
||||
// CudaEventWaitDefault as defined in include/driver_types.h:129
|
||||
CudaEventWaitDefault CudaStreamWaitFlags = iota
|
||||
// CudaEventWaitExternal as defined in include/driver_types.h:130
|
||||
CudaEventWaitExternal CudaStreamWaitFlags = 1
|
||||
)
|
||||
|
||||
// CudaErrorT as declared in include/driver_types.h:2868
|
||||
type CudaError int32
|
||||
|
||||
// CudaErrorT enumeration from include/driver_types.h:2868
|
||||
const (
|
||||
CudaSuccess CudaError = iota
|
||||
CudaErrorInvalidValue CudaError = 1
|
||||
CudaErrorMemoryAllocation CudaError = 2
|
||||
CudaErrorInitializationError CudaError = 3
|
||||
CudaErrorCudartUnloading CudaError = 4
|
||||
CudaErrorProfilerDisabled CudaError = 5
|
||||
CudaErrorProfilerNotInitialized CudaError = 6
|
||||
CudaErrorProfilerAlreadyStarted CudaError = 7
|
||||
CudaErrorProfilerAlreadyStopped CudaError = 8
|
||||
CudaErrorInvalidConfiguration CudaError = 9
|
||||
CudaErrorInvalidPitchValue CudaError = 12
|
||||
CudaErrorInvalidSymbol CudaError = 13
|
||||
CudaErrorInvalidHostPointer CudaError = 16
|
||||
CudaErrorInvalidDevicePointer CudaError = 17
|
||||
CudaErrorInvalidTexture CudaError = 18
|
||||
CudaErrorInvalidTextureBinding CudaError = 19
|
||||
CudaErrorInvalidChannelDescriptor CudaError = 20
|
||||
CudaErrorInvalidMemcpyDirection CudaError = 21
|
||||
CudaErrorAddressOfConstant CudaError = 22
|
||||
CudaErrorTextureFetchFailed CudaError = 23
|
||||
CudaErrorTextureNotBound CudaError = 24
|
||||
CudaErrorSynchronizationError CudaError = 25
|
||||
CudaErrorInvalidFilterSetting CudaError = 26
|
||||
CudaErrorInvalidNormSetting CudaError = 27
|
||||
CudaErrorMixedDeviceExecution CudaError = 28
|
||||
CudaErrorNotYetImplemented CudaError = 31
|
||||
CudaErrorMemoryValueTooLarge CudaError = 32
|
||||
CudaErrorStubLibrary CudaError = 34
|
||||
CudaErrorInsufficientDriver CudaError = 35
|
||||
CudaErrorCallRequiresNewerDriver CudaError = 36
|
||||
CudaErrorInvalidSurface CudaError = 37
|
||||
CudaErrorDuplicateVariableName CudaError = 43
|
||||
CudaErrorDuplicateTextureName CudaError = 44
|
||||
CudaErrorDuplicateSurfaceName CudaError = 45
|
||||
CudaErrorDevicesUnavailable CudaError = 46
|
||||
CudaErrorIncompatibleDriverContext CudaError = 49
|
||||
CudaErrorMissingConfiguration CudaError = 52
|
||||
CudaErrorPriorLaunchFailure CudaError = 53
|
||||
CudaErrorLaunchMaxDepthExceeded CudaError = 65
|
||||
CudaErrorLaunchFileScopedTex CudaError = 66
|
||||
CudaErrorLaunchFileScopedSurf CudaError = 67
|
||||
CudaErrorSyncDepthExceeded CudaError = 68
|
||||
CudaErrorLaunchPendingCountExceeded CudaError = 69
|
||||
CudaErrorInvalidDeviceFunction CudaError = 98
|
||||
CudaErrorNoDevice CudaError = 100
|
||||
CudaErrorInvalidDevice CudaError = 101
|
||||
CudaErrorDeviceNotLicensed CudaError = 102
|
||||
CudaErrorSoftwareValidityNotEstablished CudaError = 103
|
||||
CudaErrorStartupFailure CudaError = 127
|
||||
CudaErrorInvalidKernelImage CudaError = 200
|
||||
CudaErrorDeviceUninitialized CudaError = 201
|
||||
CudaErrorMapBufferObjectFailed CudaError = 205
|
||||
CudaErrorUnmapBufferObjectFailed CudaError = 206
|
||||
CudaErrorArrayIsMapped CudaError = 207
|
||||
CudaErrorAlreadyMapped CudaError = 208
|
||||
CudaErrorNoKernelImageForDevice CudaError = 209
|
||||
CudaErrorAlreadyAcquired CudaError = 210
|
||||
CudaErrorNotMapped CudaError = 211
|
||||
CudaErrorNotMappedAsArray CudaError = 212
|
||||
CudaErrorNotMappedAsPointer CudaError = 213
|
||||
CudaErrorECCUncorrectable CudaError = 214
|
||||
CudaErrorUnsupportedLimit CudaError = 215
|
||||
CudaErrorDeviceAlreadyInUse CudaError = 216
|
||||
CudaErrorPeerAccessUnsupported CudaError = 217
|
||||
CudaErrorInvalidPtx CudaError = 218
|
||||
CudaErrorInvalidGraphicsContext CudaError = 219
|
||||
CudaErrorNvlinkUncorrectable CudaError = 220
|
||||
CudaErrorJitCompilerNotFound CudaError = 221
|
||||
CudaErrorUnsupportedPtxVersion CudaError = 222
|
||||
CudaErrorJitCompilationDisabled CudaError = 223
|
||||
CudaErrorUnsupportedExecAffinity CudaError = 224
|
||||
CudaErrorUnsupportedDevSideSync CudaError = 225
|
||||
CudaErrorInvalidSource CudaError = 300
|
||||
CudaErrorFileNotFound CudaError = 301
|
||||
CudaErrorSharedObjectSymbolNotFound CudaError = 302
|
||||
CudaErrorSharedObjectInitFailed CudaError = 303
|
||||
CudaErrorOperatingSystem CudaError = 304
|
||||
CudaErrorInvalidResourceHandle CudaError = 400
|
||||
CudaErrorIllegalState CudaError = 401
|
||||
CudaErrorLossyQuery CudaError = 402
|
||||
CudaErrorSymbolNotFound CudaError = 500
|
||||
CudaErrorNotReady CudaError = 600
|
||||
CudaErrorIllegalAddress CudaError = 700
|
||||
CudaErrorLaunchOutOfResources CudaError = 701
|
||||
CudaErrorLaunchTimeout CudaError = 702
|
||||
CudaErrorLaunchIncompatibleTexturing CudaError = 703
|
||||
CudaErrorPeerAccessAlreadyEnabled CudaError = 704
|
||||
CudaErrorPeerAccessNotEnabled CudaError = 705
|
||||
CudaErrorSetOnActiveProcess CudaError = 708
|
||||
CudaErrorContextIsDestroyed CudaError = 709
|
||||
CudaErrorAssert CudaError = 710
|
||||
CudaErrorTooManyPeers CudaError = 711
|
||||
CudaErrorHostMemoryAlreadyRegistered CudaError = 712
|
||||
CudaErrorHostMemoryNotRegistered CudaError = 713
|
||||
CudaErrorHardwareStackError CudaError = 714
|
||||
CudaErrorIllegalInstruction CudaError = 715
|
||||
CudaErrorMisalignedAddress CudaError = 716
|
||||
CudaErrorInvalidAddressSpace CudaError = 717
|
||||
CudaErrorInvalidPc CudaError = 718
|
||||
CudaErrorLaunchFailure CudaError = 719
|
||||
CudaErrorCooperativeLaunchTooLarge CudaError = 720
|
||||
CudaErrorNotPermitted CudaError = 800
|
||||
CudaErrorNotSupported CudaError = 801
|
||||
CudaErrorSystemNotReady CudaError = 802
|
||||
CudaErrorSystemDriverMismatch CudaError = 803
|
||||
CudaErrorCompatNotSupportedOnDevice CudaError = 804
|
||||
CudaErrorMpsConnectionFailed CudaError = 805
|
||||
CudaErrorMpsRpcFailure CudaError = 806
|
||||
CudaErrorMpsServerNotReady CudaError = 807
|
||||
CudaErrorMpsMaxClientsReached CudaError = 808
|
||||
CudaErrorMpsMaxConnectionsReached CudaError = 809
|
||||
CudaErrorMpsClientTerminated CudaError = 810
|
||||
CudaErrorCdpNotSupported CudaError = 811
|
||||
CudaErrorCdpVersionMismatch CudaError = 812
|
||||
CudaErrorStreamCaptureUnsupported CudaError = 900
|
||||
CudaErrorStreamCaptureInvalidated CudaError = 901
|
||||
CudaErrorStreamCaptureMerge CudaError = 902
|
||||
CudaErrorStreamCaptureUnmatched CudaError = 903
|
||||
CudaErrorStreamCaptureUnjoined CudaError = 904
|
||||
CudaErrorStreamCaptureIsolation CudaError = 905
|
||||
CudaErrorStreamCaptureImplicit CudaError = 906
|
||||
CudaErrorCapturedEvent CudaError = 907
|
||||
CudaErrorStreamCaptureWrongThread CudaError = 908
|
||||
CudaErrorTimeout CudaError = 909
|
||||
CudaErrorGraphExecUpdateFailure CudaError = 910
|
||||
CudaErrorExternalDevice CudaError = 911
|
||||
CudaErrorInvalidClusterSize CudaError = 912
|
||||
CudaErrorUnknown CudaError = 999
|
||||
CudaErrorApiFailureBase CudaError = 10000
|
||||
)
|
||||
|
||||
type CudaMemcpyKind C.uint
|
||||
|
||||
const (
|
||||
// CudaMemcpyHostToHost as declared in include/driver_types.h:1219
|
||||
CudaMemcpyHostToHost CudaMemcpyKind = iota
|
||||
// CudaMemcpyHostToDevice as declared in include/driver_types.h:1220
|
||||
CudaMemcpyHostToDevice CudaMemcpyKind = 1
|
||||
// CudaMemcpyDeviceToHost as declared in include/driver_types.h:1221
|
||||
CudaMemcpyDeviceToHost CudaMemcpyKind = 2
|
||||
// CudaMemcpyDeviceToDevice as declared in include/driver_types.h:1222
|
||||
CudaMemcpyDeviceToDevice CudaMemcpyKind = 3
|
||||
// CudaMemcpyDefault as declared in include/driver_types.h:1223
|
||||
CudaMemcpyDefault CudaMemcpyKind = 4
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user