Compare commits

...

3 Commits

Author SHA1 Message Date
github-actions[bot]
935fd31cc0 ci: update version string in docs 2024-11-26 00:31:33 +00:00
dante
ee17f0ff9a chore: generalize the exp to other bases (#875) 2024-11-26 09:31:12 +09:00
Jseam
ee55e7dc19 fix: upgrade run-on-arch (#874) 2024-11-24 14:30:42 +09:00
13 changed files with 140 additions and 17 deletions

View File

@@ -168,7 +168,7 @@ jobs:
name: wheels
path: dist
# TODO: There's a problem with the maturin-action toolchain for arm arch leading to failed builds
# There's a problem with the maturin-action toolchain for arm arch leading to failed builds
# linux-cross:
# runs-on: ubuntu-latest
# strategy:
@@ -306,7 +306,7 @@ jobs:
manylinux: musllinux_1_2
args: --release --out dist --features python-bindings
- uses: uraimo/run-on-arch-action@v2.5.0
- uses: uraimo/run-on-arch-action@v2.8.1
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '0.0.0'
release = '15.6.4'
version = release

42
examples/onnx/exp/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.exp(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.5801457762718201, 0.6019012331962585, 0.8695418238639832, 0.17170941829681396, 0.500616729259491, 0.353726327419281, 0.6726185083389282, 0.5936906337738037]]}

View File

@@ -0,0 +1,14 @@
pytorch2.2.2:o

inputoutput/Exp"Exp
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -0,0 +1,41 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = 10**x
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.9837989807128906, 0.026381194591522217, 0.3403851389884949, 0.14531707763671875, 0.24652725458145142, 0.7945117354393005, 0.4076554775238037, 0.23064672946929932]]}

Binary file not shown.

View File

@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, HashSet},
f64::consts::E,
ops::Range,
};
@@ -5624,7 +5625,10 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config,
region,
&[sub],
&LookupOp::Exp { scale: input_scale },
&LookupOp::Exp {
scale: input_scale,
base: E.into(),
},
)?;
percent(config, region, &[ex.clone()], input_scale, output_scale)

View File

@@ -19,7 +19,7 @@ pub enum LookupOp {
PowersOfTwo { scale: utils::F32 },
Ln { scale: utils::F32 },
Sigmoid { scale: utils::F32 },
Exp { scale: utils::F32 },
Exp { scale: utils::F32, base: utils::F32 },
Cos { scale: utils::F32 },
ACos { scale: utils::F32 },
Cosh { scale: utils::F32 },
@@ -55,7 +55,7 @@ impl LookupOp {
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Exp { scale, base } => format!("exp_{}_{}", scale, base),
LookupOp::Cos { scale } => format!("cos_{}", scale),
LookupOp::ACos { scale } => format!("acos_{}", scale),
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
@@ -99,9 +99,9 @@ impl LookupOp {
LookupOp::Erf { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
}
LookupOp::Exp { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
}
LookupOp::Exp { scale, base } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::exp(&x, scale.into(), base.into()),
),
LookupOp::Cos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
}
@@ -165,7 +165,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
LookupOp::Exp { scale } => format!("EXP(scale={})", scale),
LookupOp::Exp { scale, base } => format!("EXP(scale={}, base={})", scale, base),
LookupOp::Tan { scale } => format!("TAN(scale={})", scale),
LookupOp::ATan { scale } => format!("ATAN(scale={})", scale),
LookupOp::Tanh { scale } => format!("TANH(scale={})", scale),

View File

@@ -279,6 +279,8 @@ pub fn new_op_from_onnx(
symbol_values: &SymbolValues,
run_args: &crate::RunArgs,
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
use std::f64::consts::E;
use tract_onnx::tract_core::ops::array::Trilu;
use crate::circuit::InputType;
@@ -855,6 +857,7 @@ pub fn new_op_from_onnx(
}
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[0]).into(),
base: E.into(),
}),
"Ln" => {
if run_args.bounded_log_lookup {
@@ -1134,7 +1137,22 @@ pub fn new_op_from_onnx(
})
}
} else {
unimplemented!("only support constant pow for now")
if let Some(c) = inputs[0].opkind().get_mutable_constant() {
inputs[0].decrement_use();
deleted_indices.push(0);
if c.raw_values.len() > 1 {
unimplemented!("only support scalar base")
}
let base = c.raw_values[0];
SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[1]).into(),
base: base.into(),
})
} else {
unimplemented!("only support constant base or pow for now")
}
}
}
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),

View File

@@ -1664,7 +1664,7 @@ pub mod nonlinearities {
/// Some(&[2, 15, 2, 1, 1, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = exp(&x, 1.0);
/// let result = exp(&x, 1.0, std::f64::consts::E);
/// let expected = Tensor::<IntegerRep>::new(Some(&[7, 3269017, 7, 3, 3, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
///
@@ -1673,16 +1673,16 @@ pub mod nonlinearities {
/// Some(&[37, 12, 41]),
/// &[3],
/// ).unwrap();
/// let result = exp(&x, 512.0);
/// let result = exp(&x, 512.0, std::f64::consts::E);
///
/// let expected = Tensor::<IntegerRep>::new(Some(&[550, 524, 555]), &[3]).unwrap();
///
/// assert_eq!(result, expected);
/// ```
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64, base: f64) -> Tensor<IntegerRep> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.exp();
let fout = scale_input * base.powf(kix);
let rounded = fout.round();
Ok::<_, TensorError>(rounded as IntegerRep)
})

View File

@@ -205,7 +205,7 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 96] = [
const TESTS: [&str; 98] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
@@ -306,6 +306,8 @@ mod native_tests {
"lenet_5", // 93
"rsqrt", // 94
"log", // 95
"exp", // 96
"general_exp", // 97
];
const WASM_TESTS: [&str; 46] = [
@@ -544,7 +546,7 @@ mod native_tests {
}
});
seq!(N in 0..=95 {
seq!(N in 0..=97 {
#(#[test_case(TESTS[N])])*
#[ignore]