mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 00:08:12 -05:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d48d0b0b3e | ||
|
|
8b223354cc |
1
.github/workflows/pypi-gpu.yml
vendored
1
.github/workflows/pypi-gpu.yml
vendored
@@ -34,6 +34,7 @@ jobs:
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig >pyproject.toml
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
|
||||
9
.github/workflows/pypi.yml
vendored
9
.github/workflows/pypi.yml
vendored
@@ -233,6 +233,14 @@ jobs:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -242,7 +250,6 @@ jobs:
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
@@ -12,6 +12,7 @@ asyncio_mode = "auto"
|
||||
|
||||
[project]
|
||||
name = "ezkl"
|
||||
version = "0.0.0"
|
||||
requires-python = ">=3.7"
|
||||
classifiers = [
|
||||
"Programming Language :: Rust",
|
||||
|
||||
@@ -142,8 +142,6 @@ use tract_onnx::prelude::SymbolValues;
|
||||
pub fn extract_tensor_value(
|
||||
input: Arc<tract_onnx::prelude::Tensor>,
|
||||
) -> Result<Tensor<f32>, GraphError> {
|
||||
use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
|
||||
|
||||
let dt = input.datum_type();
|
||||
let dims = input.shape().to_vec();
|
||||
|
||||
@@ -156,7 +154,7 @@ pub fn extract_tensor_value(
|
||||
match dt {
|
||||
DatumType::F16 => {
|
||||
let vec = input.as_slice::<tract_onnx::prelude::f16>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| (*x).into()).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| (*x).into()).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::F32 => {
|
||||
@@ -165,61 +163,61 @@ pub fn extract_tensor_value(
|
||||
}
|
||||
DatumType::F64 => {
|
||||
let vec = input.as_slice::<f64>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::I64 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<i64>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::I32 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<i32>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::I16 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<i16>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::I8 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<i8>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::U8 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<u8>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::U16 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<u16>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::U32 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<u32>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::U64 => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<u64>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::Bool => {
|
||||
// Generally a shape or hyperparam
|
||||
let vec = input.as_slice::<bool>()?.to_vec();
|
||||
let cast: Vec<f32> = vec.par_iter().map(|x| *x as usize as f32).collect();
|
||||
let cast: Vec<f32> = vec.iter().map(|x| *x as usize as f32).collect();
|
||||
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
|
||||
}
|
||||
DatumType::TDim => {
|
||||
@@ -227,7 +225,7 @@ pub fn extract_tensor_value(
|
||||
let vec = input.as_slice::<tract_onnx::prelude::TDim>()?.to_vec();
|
||||
|
||||
let cast: Result<Vec<f32>, GraphError> = vec
|
||||
.par_iter()
|
||||
.iter()
|
||||
.map(|x| match x.to_i64() {
|
||||
Ok(v) => Ok(v as f32),
|
||||
Err(_) => match x.to_i64() {
|
||||
|
||||
@@ -638,42 +638,44 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
where
|
||||
T: Send + Sync,
|
||||
{
|
||||
if indices.is_empty() {
|
||||
// Fast path: empty indices or full tensor slice
|
||||
if indices.is_empty()
|
||||
|| indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims
|
||||
{
|
||||
return Ok(self.clone());
|
||||
}
|
||||
|
||||
// Validate dimensions
|
||||
if self.dims.len() < indices.len() {
|
||||
return Err(TensorError::DimError(format!(
|
||||
"The dimensionality of the slice {:?} is greater than the tensor's {:?}",
|
||||
indices, self.dims
|
||||
)));
|
||||
} else if indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims {
|
||||
// else if slice is the same as dims, return self
|
||||
return Ok(self.clone());
|
||||
}
|
||||
|
||||
// if indices weren't specified we fill them in as required
|
||||
let mut full_indices = indices.to_vec();
|
||||
// Pre-allocate the full indices vector with capacity
|
||||
let mut full_indices = Vec::with_capacity(self.dims.len());
|
||||
full_indices.extend_from_slice(indices);
|
||||
|
||||
for i in 0..(self.dims.len() - indices.len()) {
|
||||
full_indices.push(0..self.dims()[indices.len() + i])
|
||||
}
|
||||
// Fill remaining dimensions
|
||||
full_indices.extend((indices.len()..self.dims.len()).map(|i| 0..self.dims[i]));
|
||||
|
||||
let cartesian_coord: Vec<Vec<usize>> = full_indices
|
||||
// Pre-calculate total size and allocate result vector
|
||||
let total_size: usize = full_indices
|
||||
.iter()
|
||||
.cloned()
|
||||
.multi_cartesian_product()
|
||||
.collect();
|
||||
|
||||
let res: Vec<T> = cartesian_coord
|
||||
.par_iter()
|
||||
.map(|e| {
|
||||
let index = self.get_index(e);
|
||||
self[index].clone()
|
||||
})
|
||||
.collect();
|
||||
.map(|range| range.end - range.start)
|
||||
.product();
|
||||
let mut res = Vec::with_capacity(total_size);
|
||||
|
||||
// Calculate new dimensions once
|
||||
let dims: Vec<usize> = full_indices.iter().map(|e| e.end - e.start).collect();
|
||||
|
||||
// Use iterator directly without collecting into intermediate Vec
|
||||
for coord in full_indices.iter().cloned().multi_cartesian_product() {
|
||||
let index = self.get_index(&coord);
|
||||
res.push(self[index].clone());
|
||||
}
|
||||
|
||||
Tensor::new(Some(&res), &dims)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user