mirror of
https://github.com/pseXperiments/icicle.git
synced 2026-01-08 23:17:54 -05:00
session 4 start
This commit is contained in:
@@ -32,4 +32,13 @@ build_transpose:
|
||||
|
||||
run_transpose:
|
||||
make build_transpose
|
||||
work/transpose
|
||||
work/transpose
|
||||
|
||||
|
||||
build_compute:
|
||||
mkdir -p work
|
||||
nvcc -lineinfo -o work/compute -std=c++17 -arch=sm_80 -I. -I../../include compute_test.cu
|
||||
|
||||
run_compute:
|
||||
make build_compute
|
||||
work/compute
|
||||
130
icicle/src/mini-course-examples/compute_test.cu
Normal file
130
icicle/src/mini-course-examples/compute_test.cu
Normal file
@@ -0,0 +1,130 @@
|
||||
#include "fields/id.h"
|
||||
// #define FIELD_ID 1001
|
||||
#define CURVE_ID 3
|
||||
#include "curves/curve_config.cuh"
|
||||
// #include "fields/field_config.cuh"
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
#include <cub/device/device_radix_sort.cuh>
|
||||
|
||||
#include "fields/field.cuh"
|
||||
#include "curves/projective.cuh"
|
||||
#include "gpu-utils/device_context.cuh"
|
||||
|
||||
#include "kernels.cu"
|
||||
|
||||
class Dummy_Scalar
|
||||
{
|
||||
public:
|
||||
static constexpr unsigned NBITS = 32;
|
||||
|
||||
unsigned x;
|
||||
unsigned p = 10;
|
||||
// unsigned p = 1<<30;
|
||||
|
||||
static HOST_DEVICE_INLINE Dummy_Scalar zero() { return {0}; }
|
||||
|
||||
static HOST_DEVICE_INLINE Dummy_Scalar one() { return {1}; }
|
||||
|
||||
friend HOST_INLINE std::ostream& operator<<(std::ostream& os, const Dummy_Scalar& scalar)
|
||||
{
|
||||
os << scalar.x;
|
||||
return os;
|
||||
}
|
||||
|
||||
HOST_DEVICE_INLINE unsigned get_scalar_digit(unsigned digit_num, unsigned digit_width) const
|
||||
{
|
||||
return (x >> (digit_num * digit_width)) & ((1 << digit_width) - 1);
|
||||
}
|
||||
|
||||
friend HOST_DEVICE_INLINE Dummy_Scalar operator+(Dummy_Scalar p1, const Dummy_Scalar& p2)
|
||||
{
|
||||
return {(p1.x + p2.x) % p1.p};
|
||||
}
|
||||
|
||||
friend HOST_DEVICE_INLINE bool operator==(const Dummy_Scalar& p1, const Dummy_Scalar& p2) { return (p1.x == p2.x); }
|
||||
|
||||
friend HOST_DEVICE_INLINE bool operator==(const Dummy_Scalar& p1, const unsigned p2) { return (p1.x == p2); }
|
||||
|
||||
static HOST_DEVICE_INLINE Dummy_Scalar neg(const Dummy_Scalar& scalar) { return {scalar.p - scalar.x}; }
|
||||
static HOST_INLINE Dummy_Scalar rand_host()
|
||||
{
|
||||
return {(unsigned)rand() % 10};
|
||||
// return {(unsigned)rand()};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// typedef field_config::scalar_t test_scalar;
|
||||
typedef curve_config::scalar_t test_scalar;
|
||||
typedef curve_config::projective_t test_projective;
|
||||
typedef curve_config::affine_t test_affine;
|
||||
|
||||
// typedef uint32_t test_t;
|
||||
// typedef int4 test_t;
|
||||
// typedef Dummy_Scalar test_t;
|
||||
typedef test_projective test_t;
|
||||
// typedef test_scalar test_t;
|
||||
|
||||
#define REPS 8
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
cudaEvent_t start, stop;
|
||||
float kernel_time;
|
||||
|
||||
cudaEventCreate(&start);
|
||||
cudaEventCreate(&stop);
|
||||
|
||||
int N = 1<<22;
|
||||
// int N = 1<<25;
|
||||
|
||||
test_t* arr1_h = new test_t[N];
|
||||
test_t* arr2_h = new test_t[N];
|
||||
|
||||
test_t *arr1_d, *arr2_d;
|
||||
|
||||
cudaMalloc(&arr1_d, N*sizeof(test_t));
|
||||
cudaMalloc(&arr2_d, N*sizeof(test_t));
|
||||
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
arr1_h[i] = i > 100? arr1_h[i-100] : test_t::rand_host();
|
||||
// arr1_h[i] = i > 100? arr1_h[i-100] : rand();
|
||||
}
|
||||
|
||||
cudaMemcpy(arr1_d, arr1_h, sizeof(test_t) * N, cudaMemcpyHostToDevice);
|
||||
|
||||
int THREADS = 128;
|
||||
int BLOCKS = (N + THREADS - 1)/THREADS;
|
||||
|
||||
//warm up
|
||||
add_many_times<test_t,16><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
// multi_mult<test_t,8><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
cudaDeviceSynchronize();
|
||||
std::cout << "cuda err: " << cudaGetErrorString(cudaGetLastError()) << std::endl;
|
||||
|
||||
cudaEventRecord(start, 0);
|
||||
|
||||
// add_many_times<test_t,REPS><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
// multi_add<test_t,REPS><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
// limb_mult_bench<REPS><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
segment_sum<test_t,REPS><<<BLOCKS, THREADS>>>(arr1_d, N);
|
||||
// shmem_segment_sum<test_t,REPS><<<BLOCKS, THREADS>>>(arr1_d, N);
|
||||
// multi_mult<test_t,REPS><<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
// multi_ntt8<<<BLOCKS, THREADS>>>(arr1_d, arr2_d, N);
|
||||
|
||||
cudaDeviceSynchronize();
|
||||
std::cout << "cuda err: " << cudaGetErrorString(cudaGetLastError()) << std::endl;
|
||||
cudaEventRecord(stop, 0);
|
||||
cudaStreamSynchronize(0);
|
||||
cudaEventElapsedTime(&kernel_time, start, stop);
|
||||
printf("kernel_time : %.3f ms.\n", kernel_time);
|
||||
// printf("normalized kernel_time : %.3f ms.\n", kernel_time/REPS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -214,4 +214,244 @@ __global__ void shmem_transpose(T *in, T *out, int row_length){
|
||||
__syncthreads();
|
||||
// out[block_col_id*row_length*16 + block_row_id*16 + shmem_col_id*row_length + shmem_row_id] = shmem[shmem_row_id][shmem_col_id];
|
||||
out[block_col_id*row_length*16 + block_row_id*16 + shmem_col_id*row_length + shmem_row_id] = shmem[shmem_col_id][shmem_row_id];
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, int REPS>
|
||||
__global__ void add_many_times(T *in, T *out, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
if (tid >= size) return;
|
||||
T temp;
|
||||
#pragma unroll
|
||||
for (int i = 0; i < REPS; i++)
|
||||
{
|
||||
temp = i? temp + temp : in[tid];
|
||||
}
|
||||
out[tid] = temp;
|
||||
}
|
||||
|
||||
|
||||
template <class T, int REPS>
|
||||
__global__ void multi_add(T *in, T *out, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int segment_length = size / REPS;
|
||||
if (tid >= segment_length) return;
|
||||
// #pragma unroll 1
|
||||
for (int i = 0; i < REPS; i++)
|
||||
{
|
||||
out[tid + i*segment_length] = in[tid + i*segment_length] + in[tid + i*segment_length];
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, int SEG_SIZE>
|
||||
__global__ void segment_sum(T *inout, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int nof_segments = size / SEG_SIZE;
|
||||
if (tid >= nof_segments) return;
|
||||
T sum = T::zero();
|
||||
T sums_sum = T::zero();
|
||||
for (int i = 0; i < SEG_SIZE; i++)
|
||||
{
|
||||
sums_sum = sums_sum + sum;
|
||||
sum = sum + inout[tid * SEG_SIZE + i];
|
||||
}
|
||||
inout[tid * SEG_SIZE] = sums_sum;
|
||||
// inout[tid * SEG_SIZE] = sum;
|
||||
}
|
||||
|
||||
template <class T, int SEG_SIZE>
|
||||
__global__ void shmem_segment_sum(T *inout, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int nof_segments = size / SEG_SIZE;
|
||||
if (tid >= nof_segments) return;
|
||||
__shared__ T shmem[128*2];
|
||||
// T sum = T::zero();
|
||||
// T sums_sum = T::zero();
|
||||
shmem[2*threadIdx.x] = T::zero(); //sum
|
||||
shmem[2*threadIdx.x + 1] = T::zero(); //sums_sum
|
||||
for (int i = 0; i < SEG_SIZE; i++)
|
||||
{
|
||||
{T sum = shmem[2*threadIdx.x];
|
||||
T sums_sum = shmem[2*threadIdx.x + 1];
|
||||
shmem[2*threadIdx.x + 1] = sums_sum + sum;}
|
||||
// {T sum = shmem[2*(127-threadIdx.x)];
|
||||
// T sums_sum = shmem[2*(127-threadIdx.x) + 1];
|
||||
// shmem[2*(127-threadIdx.x) + 1] = sums_sum + sum;}
|
||||
// shmem[2*(127-threadIdx.x) + 1] = shmem[2*(127-threadIdx.x) + 1] + shmem[2*(127-threadIdx.x)];
|
||||
// shmem[2*threadIdx.x + 1] = shmem[2*threadIdx.x + 1] + shmem[2*threadIdx.x];
|
||||
// __syncthreads();
|
||||
{T sum = shmem[2*threadIdx.x];
|
||||
T sums_sum = inout[tid * SEG_SIZE + i];
|
||||
shmem[2*threadIdx.x] = sum + sums_sum;}
|
||||
// shmem[2*threadIdx.x] = shmem[2*threadIdx.x] + inout[tid * SEG_SIZE + i];
|
||||
// __syncthreads();
|
||||
}
|
||||
inout[tid * SEG_SIZE] = shmem[2*threadIdx.x + 1];
|
||||
// inout[tid * SEG_SIZE] = sum;
|
||||
}
|
||||
|
||||
template <class T, int REPS>
|
||||
__global__ void multi_mult(T *in, T *out, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int segment_length = size / REPS;
|
||||
if (tid >= segment_length) return;
|
||||
#pragma unroll 1
|
||||
for (int i = 0; i < REPS; i++)
|
||||
{
|
||||
out[tid + i*segment_length] = in[tid + i*segment_length] * in[tid + i*segment_length];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class E>
|
||||
DEVICE_INLINE void ntt8opt(E& X0, E& X1, E& X2, E& X3, E& X4, E& X5, E& X6, E& X7)
|
||||
{
|
||||
E T;
|
||||
|
||||
T = X3 - X7;
|
||||
X7 = X3 + X7;
|
||||
X3 = X1 - X5;
|
||||
X5 = X1 + X5;
|
||||
X1 = X2 + X6;
|
||||
X2 = X2 - X6;
|
||||
X6 = X0 + X4;
|
||||
X0 = X0 - X4;
|
||||
|
||||
X4 = X6 + X1;
|
||||
X6 = X6 - X1;
|
||||
X1 = X3 + T;
|
||||
X3 = X3 - T;
|
||||
T = X5 + X7;
|
||||
X5 = X5 - X7;
|
||||
X7 = X0 + X2;
|
||||
X0 = X0 - X2;
|
||||
|
||||
X2 = X6 + X5;
|
||||
X6 = X6 - X5;
|
||||
X5 = X7 - X1;
|
||||
X1 = X7 + X1;
|
||||
X7 = X0 - X3;
|
||||
X3 = X0 + X3;
|
||||
X0 = X4 + T;
|
||||
X4 = X4 - T;
|
||||
}
|
||||
|
||||
|
||||
template <class E>
|
||||
DEVICE_INLINE void ntt8(E& X0, E& X1, E& X2, E& X3, E& X4, E& X5, E& X6, E& X7)
|
||||
{
|
||||
E Y0,Y1,Y2,Y3,Y4,Y5,Y6,Y7;
|
||||
|
||||
Y0 = X0 + X4;
|
||||
Y1 = X0 - X4;
|
||||
Y2 = X1 - X5;
|
||||
Y3 = X1 + X5;
|
||||
Y4 = X2 + X6;
|
||||
Y5 = X2 - X6;
|
||||
Y6 = X3 - X7;
|
||||
Y7 = X3 + X7;
|
||||
|
||||
X0 = Y0 + Y2;
|
||||
X1 = Y0 - Y2;
|
||||
X2 = Y1 - Y3;
|
||||
X3 = Y1 + Y3;
|
||||
X4 = Y4 + Y6;
|
||||
X5 = Y4 - Y6;
|
||||
X6 = Y5 - Y7;
|
||||
X7 = Y5 + Y7;
|
||||
|
||||
Y0 = X0 + X1;
|
||||
Y1 = X0 - X1;
|
||||
Y2 = X2 - X3;
|
||||
Y3 = X2 + X3;
|
||||
Y4 = X4 + X5;
|
||||
Y5 = X4 - X5;
|
||||
Y6 = X6 - X7;
|
||||
Y7 = X6 + X7;
|
||||
|
||||
X0 = Y0;
|
||||
X1 = Y1;
|
||||
X2 = Y2;
|
||||
X3 = Y3;
|
||||
X4 = Y4;
|
||||
X5 = Y5;
|
||||
X6 = Y6;
|
||||
X7 = Y7;
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <class T>
|
||||
__global__ void multi_ntt8(T *in, T *out, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int segment_length = size / 8;
|
||||
if (tid >= segment_length) return;
|
||||
T X[8];
|
||||
#pragma unroll
|
||||
for (int i = 0; i < 8; i++)
|
||||
{
|
||||
X[i] = in[tid + i*segment_length];
|
||||
}
|
||||
// ntt8(X[0],X[1],X[2],X[3],X[4],X[5],X[6],X[7]);
|
||||
ntt8opt(X[0],X[1],X[2],X[3],X[4],X[5],X[6],X[7]);
|
||||
#pragma unroll
|
||||
for (int i = 0; i < 8; i++)
|
||||
{
|
||||
out[tid + i*segment_length] = X[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__device__ void mul_naive(uint32_t *a, uint32_t *b, uint32_t *r){
|
||||
__align__(8) uint32_t odd[2];
|
||||
r[0] = ptx::mul_lo(a[0], b[0]);
|
||||
r[1] = ptx::mul_hi(a[0], b[0]);
|
||||
r[1] = ptx::mad_lo(a[0], b[1], r[1]);
|
||||
r[1] = ptx::mad_lo(a[1], b[0], r[1]);
|
||||
r[2] = ptx::mul_lo(a[1], b[1]);
|
||||
r[2] = ptx::mad_hi(a[1], b[0], r[2]);
|
||||
r[2] = ptx::mad_hi(a[0], b[1], r[2]);
|
||||
r[3] = ptx::mul_hi(a[1], b[1]);
|
||||
|
||||
r[0] = ptx::add_cc(r[0], r[1]);
|
||||
r[1] = ptx::add_cc(r[2], r[3]);
|
||||
}
|
||||
|
||||
__device__ void mul_icicle(uint32_t *a, uint32_t *b, uint32_t *r){
|
||||
__align__(8) uint32_t odd[2];
|
||||
r[0] = ptx::mul_lo(a[0], b[0]);
|
||||
r[1] = ptx::mul_hi(a[0], b[0]);
|
||||
r[2] = ptx::mul_lo(a[1], b[1]);
|
||||
r[3] = ptx::mul_hi(a[1], b[1]);
|
||||
odd[0] = ptx::mul_lo(a[0], b[1]);
|
||||
odd[1] = ptx::mul_hi(a[0], b[1]);
|
||||
odd[0] = ptx::mad_lo(a[1], b[0], odd[0]);
|
||||
odd[1] = ptx::mad_hi(a[1], b[0], odd[1]);
|
||||
r[1] = ptx::add_cc(r[1], odd[0]);
|
||||
r[2] = ptx::addc_cc(r[2], odd[1]);
|
||||
r[3] = ptx::addc(r[3], 0);
|
||||
|
||||
r[0] = ptx::add_cc(r[0], r[1]);
|
||||
r[1] = ptx::add_cc(r[2], r[3]);
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <int REPS>
|
||||
__global__ void limb_mult_bench(uint32_t *in, uint32_t *out, int size){
|
||||
int tid = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
if (tid >= size/2) return;
|
||||
uint32_t res[4];
|
||||
res[0] = in[tid];
|
||||
res[1] = in[tid + size/2];
|
||||
// typename T::Wide temp;
|
||||
for (int i = 0; i < REPS; i++)
|
||||
{
|
||||
mul_naive(res, res, res);
|
||||
// mul_icicle(res, res, res);
|
||||
// T::multiply_raw_device(res.limbs_storage, res.limbs_storage, res.limbs_storage);
|
||||
// temp = T::mul_wide(res, res);
|
||||
}
|
||||
// out[tid] = T::reduce(temp);
|
||||
out[tid] = res[0];
|
||||
out[tid + size/2] = res[1];
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user