Compare commits

...

4 Commits

Author SHA1 Message Date
github-actions[bot]
371e67421c ci: update version string in docs 2024-11-28 08:15:13 +00:00
dante
64cbcb3f7e chore: explicitly compile div op (#876) 2024-11-28 17:14:53 +09:00
dante
ee17f0ff9a chore: generalize the exp to other bases (#875) 2024-11-26 09:31:12 +09:00
Jseam
ee55e7dc19 fix: upgrade run-on-arch (#874) 2024-11-24 14:30:42 +09:00
14 changed files with 193 additions and 54 deletions

View File

@@ -168,7 +168,7 @@ jobs:
name: wheels
path: dist
# TODO: There's a problem with the maturin-action toolchain for arm arch leading to failed builds
# There's a problem with the maturin-action toolchain for arm arch leading to failed builds
# linux-cross:
# runs-on: ubuntu-latest
# strategy:
@@ -306,7 +306,7 @@ jobs:
manylinux: musllinux_1_2
args: --release --out dist --features python-bindings
- uses: uraimo/run-on-arch-action@v2.5.0
- uses: uraimo/run-on-arch-action@v2.8.1
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}

View File

@@ -207,23 +207,6 @@ jobs:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
run: wasm-pack test --chrome --headless -- -Z build-std="panic_abort,std" --features web
tutorial:
runs-on: ubuntu-latest
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Circuit Render
run: cargo nextest run --release --verbose tests::tutorial_
mock-proving-tests:
runs-on: non-gpu
needs: [build, library-tests, docs, python-tests, python-integration-tests]
@@ -494,23 +477,23 @@ jobs:
- name: Mock aggr tests (KZG)
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_mock_prove_and_verify_ --test-threads 8
prove-and-verify-aggr-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
# prove-and-verify-aggr-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - name: KZG tests
# run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
prove-and-verify-aggr-tests:
runs-on: large-self-hosted
@@ -614,8 +597,6 @@ jobs:
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Div rebase
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
- name: Public inputs
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_inputs_
- name: fixed params

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '0.0.0'
release = '15.6.5'
version = release

42
examples/onnx/exp/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.exp(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.5801457762718201, 0.6019012331962585, 0.8695418238639832, 0.17170941829681396, 0.500616729259491, 0.353726327419281, 0.6726185083389282, 0.5936906337738037]]}

View File

@@ -0,0 +1,14 @@
pytorch2.2.2:o

inputoutput/Exp"Exp
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -0,0 +1,41 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = 10**x
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.9837989807128906, 0.026381194591522217, 0.3403851389884949, 0.14531707763671875, 0.24652725458145142, 0.7945117354393005, 0.4076554775238037, 0.23064672946929932]]}

Binary file not shown.

View File

@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, HashSet},
f64::consts::E,
ops::Range,
};
@@ -5624,7 +5625,10 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config,
region,
&[sub],
&LookupOp::Exp { scale: input_scale },
&LookupOp::Exp {
scale: input_scale,
base: E.into(),
},
)?;
percent(config, region, &[ex.clone()], input_scale, output_scale)

View File

@@ -19,7 +19,7 @@ pub enum LookupOp {
PowersOfTwo { scale: utils::F32 },
Ln { scale: utils::F32 },
Sigmoid { scale: utils::F32 },
Exp { scale: utils::F32 },
Exp { scale: utils::F32, base: utils::F32 },
Cos { scale: utils::F32 },
ACos { scale: utils::F32 },
Cosh { scale: utils::F32 },
@@ -55,7 +55,7 @@ impl LookupOp {
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Exp { scale, base } => format!("exp_{}_{}", scale, base),
LookupOp::Cos { scale } => format!("cos_{}", scale),
LookupOp::ACos { scale } => format!("acos_{}", scale),
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
@@ -99,9 +99,9 @@ impl LookupOp {
LookupOp::Erf { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
}
LookupOp::Exp { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
}
LookupOp::Exp { scale, base } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::exp(&x, scale.into(), base.into()),
),
LookupOp::Cos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
}
@@ -165,7 +165,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
LookupOp::Exp { scale } => format!("EXP(scale={})", scale),
LookupOp::Exp { scale, base } => format!("EXP(scale={}, base={})", scale, base),
LookupOp::Tan { scale } => format!("TAN(scale={})", scale),
LookupOp::ATan { scale } => format!("ATAN(scale={})", scale),
LookupOp::Tanh { scale } => format!("TANH(scale={})", scale),

View File

@@ -279,6 +279,8 @@ pub fn new_op_from_onnx(
symbol_values: &SymbolValues,
run_args: &crate::RunArgs,
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
use std::f64::consts::E;
use tract_onnx::tract_core::ops::array::Trilu;
use crate::circuit::InputType;
@@ -855,6 +857,7 @@ pub fn new_op_from_onnx(
}
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[0]).into(),
base: E.into(),
}),
"Ln" => {
if run_args.bounded_log_lookup {
@@ -1134,7 +1137,57 @@ pub fn new_op_from_onnx(
})
}
} else {
unimplemented!("only support constant pow for now")
if let Some(c) = inputs[0].opkind().get_mutable_constant() {
inputs[0].decrement_use();
deleted_indices.push(0);
if c.raw_values.len() > 1 {
unimplemented!("only support scalar base")
}
let base = c.raw_values[0];
SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[1]).into(),
base: base.into(),
})
} else {
unimplemented!("only support constant base or pow for now")
}
}
}
"Div" => {
let const_idx = inputs
.iter()
.enumerate()
.filter(|(_, n)| n.is_constant())
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_idx.len() > 1 {
return Err(GraphError::InvalidDims(idx, "div".to_string()));
}
let const_idx = const_idx[0];
if const_idx != 1 {
unimplemented!("only support div with constant as second input")
}
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
if c.raw_values.len() == 1 && c.raw_values[0] != 0. {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
// get the non constant index
let denom = c.raw_values[0];
SupportedOp::Hybrid(HybridOp::Div {
denom: denom.into(),
})
} else {
unimplemented!("only support non zero divisors of size 1")
}
} else {
unimplemented!("only support div with constant as second input")
}
}
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),

View File

@@ -1664,7 +1664,7 @@ pub mod nonlinearities {
/// Some(&[2, 15, 2, 1, 1, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = exp(&x, 1.0);
/// let result = exp(&x, 1.0, std::f64::consts::E);
/// let expected = Tensor::<IntegerRep>::new(Some(&[7, 3269017, 7, 3, 3, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
///
@@ -1673,16 +1673,16 @@ pub mod nonlinearities {
/// Some(&[37, 12, 41]),
/// &[3],
/// ).unwrap();
/// let result = exp(&x, 512.0);
/// let result = exp(&x, 512.0, std::f64::consts::E);
///
/// let expected = Tensor::<IntegerRep>::new(Some(&[550, 524, 555]), &[3]).unwrap();
///
/// assert_eq!(result, expected);
/// ```
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64, base: f64) -> Tensor<IntegerRep> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.exp();
let fout = scale_input * base.powf(kix);
let rounded = fout.round();
Ok::<_, TensorError>(rounded as IntegerRep)
})

View File

@@ -205,7 +205,7 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 96] = [
const TESTS: [&str; 98] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
@@ -306,6 +306,8 @@ mod native_tests {
"lenet_5", // 93
"rsqrt", // 94
"log", // 95
"exp", // 96
"general_exp", // 97
];
const WASM_TESTS: [&str; 46] = [
@@ -490,7 +492,7 @@ mod native_tests {
#[cfg(feature="icicle")]
seq!(N in 0..=2 {
#(#[test_case(TESTS_AGGR[N])])*
fn aggr_prove_and_verify_(test: &str) {
fn kzg_aggr_prove_and_verify_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(test_dir.path().to_str().unwrap(), test);
@@ -544,7 +546,7 @@ mod native_tests {
}
});
seq!(N in 0..=95 {
seq!(N in 0..=97 {
#(#[test_case(TESTS[N])])*
#[ignore]