Compare commits

...

6 Commits

Author SHA1 Message Date
Jason Morton
11ac120f23 fix: large test numbering(#689)
Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2024-01-21 21:01:46 +00:00
Jseam
0fdd92e9f3 fix: move install_ezkl_cli.sh into main repo (#694)
Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2024-01-21 20:59:39 +00:00
Alexander Camuto
31f58056a5 chore: bump py03 2024-01-21 20:58:32 +00:00
dante
ddbcc1d2d8 fix: calibration should only consider local scales (#691) 2024-01-18 23:28:49 +00:00
Vehorny
feccc5feed chore(examples): proofreading the notebooks (#687)
---------

Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2024-01-18 14:48:02 +00:00
dante
db24577c5d fix: calibrate from total min/max on lookups rather than individual x (#690) 2024-01-17 23:59:15 +00:00
19 changed files with 389 additions and 191 deletions

View File

@@ -6,7 +6,7 @@ on:
description: "Test scenario tags"
jobs:
large-tests:
runs-on: self-hosted
runs-on: kaiju
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
@@ -23,6 +23,6 @@ jobs:
- name: Self Attention KZG prove and verify large tests
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_0_expects -- --include-ignored
- name: mobilenet Mock
run: cargo test --release --verbose tests::large_mock_::large_tests_2_expects -- --include-ignored
run: cargo test --release --verbose tests::large_mock_::large_tests_3_expects -- --include-ignored
- name: mobilenet KZG prove and verify large tests
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_2_expects -- --include-ignored
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_3_expects -- --include-ignored

View File

@@ -465,7 +465,7 @@ jobs:
# run: cargo nextest run --release --verbose tests::kzg_fuzz_ --test-threads 6
prove-and-verify-mock-aggr-tests:
runs-on: ubuntu-latest-32-cores
runs-on: self-hosted
needs: [build, library-tests]
steps:
- uses: actions/checkout@v4
@@ -611,27 +611,7 @@ jobs:
run: source .env/bin/activate; cargo nextest run --release --verbose tests::resources_accuracy_measurement_public_outputs_
python-integration-tests:
runs-on:
large-self-hosted
# Service containers to run with `container-job`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
env:
POSTGRES_USER: ubuntu
POSTGRES_HOST_AUTH_METHOD: trust
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
# needs: [build, library-tests, docs]
runs-on: large-self-hosted
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
@@ -663,13 +643,14 @@ jobs:
# # now dump the contents of the file into a file called kaggle.json
# echo $KAGGLE_API_KEY > /home/ubuntu/.kaggle/kaggle.json
# chmod 600 /home/ubuntu/.kaggle/kaggle.json
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: Tictactoe tutorials
run: source .env/bin/activate; cargo nextest run py_tests::tests::tictactoe_ --test-threads 1
# - name: Postgres tutorials
# run: source .env/bin/activate; cargo nextest run py_tests::tests::postgres_ --test-threads 1
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_

58
Cargo.lock generated
View File

@@ -1088,7 +1088,7 @@ dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset 0.9.0",
"memoffset",
"scopeguard",
]
@@ -2668,9 +2668,9 @@ dependencies = [
[[package]]
name = "indoc"
version = "1.0.9"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8"
[[package]]
name = "inout"
@@ -3001,15 +3001,6 @@ dependencies = [
"libc",
]
[[package]]
name = "memoffset"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.9.0"
@@ -3840,14 +3831,14 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3b1ac5b3731ba34fdaa9785f8d74d17448cd18f30cf19e0c7e7b1fdb5272109"
checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0"
dependencies = [
"cfg-if",
"indoc",
"libc",
"memoffset 0.8.0",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
@@ -3857,9 +3848,9 @@ dependencies = [
[[package]]
name = "pyo3-asyncio"
version = "0.18.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3564762e37035cfc486228e10b0528460fa026d681b5763873c693aa0d5c260"
checksum = "6ea6b68e93db3622f3bb3bf363246cf948ed5375afe7abff98ccbdd50b184995"
dependencies = [
"futures",
"once_cell",
@@ -3871,9 +3862,9 @@ dependencies = [
[[package]]
name = "pyo3-asyncio-macros"
version = "0.18.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be72d4cd43a27530306bd0d20d3932182fbdd072c6b98d3638bc37efb9d559dd"
checksum = "56c467178e1da6252c95c29ecf898b133f742e9181dca5def15dc24e19d45a39"
dependencies = [
"proc-macro2",
"quote",
@@ -3882,9 +3873,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cb946f5ac61bb61a5014924910d936ebd2b23b705f7a4a3c40b05c720b079a3"
checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be"
dependencies = [
"once_cell",
"target-lexicon",
@@ -3892,9 +3883,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd4d7c5337821916ea2a1d21d1092e8443cf34879e53a0ac653fbb98f44ff65c"
checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1"
dependencies = [
"libc",
"pyo3-build-config",
@@ -3902,9 +3893,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.8.2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
checksum = "4c10808ee7250403bedb24bc30c32493e93875fef7ba3e4292226fe924f398bd"
dependencies = [
"arc-swap",
"log",
@@ -3913,25 +3904,26 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d39c55dab3fc5a4b25bbd1ac10a2da452c4aca13bb450f22818a002e29648d"
checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 1.0.109",
"syn 2.0.22",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97daff08a4c48320587b5224cc98d609e3c27b6d437315bd40b605c98eeb5918"
checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 1.0.109",
"syn 2.0.22",
]
[[package]]
@@ -5552,9 +5544,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "unindent"
version = "0.1.11"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce"
[[package]]
name = "unzip-n"

View File

@@ -51,9 +51,9 @@ plotters = { version = "0.3.0", default_features = false, optional = true }
regex = { version = "1", default_features = false }
tokio = { version = "1.26.0", default_features = false, features = ["macros", "rt"] }
tokio-util = { version = "0.7.9", features = ["codec"] }
pyo3 = { version = "0.18.3", features = ["extension-module", "abi3-py37", "macros"], default_features = false, optional = true }
pyo3-asyncio = { version = "0.18.0", features = ["attributes", "tokio-runtime"], default_features = false, optional = true }
pyo3-log = { version = "0.8.1", default_features = false, optional = true }
pyo3 = { version = "0.20.2", features = ["extension-module", "abi3-py37", "macros"], default_features = false, optional = true }
pyo3-asyncio = { version = "0.20.0", features = ["attributes", "tokio-runtime"], default_features = false, optional = true }
pyo3-log = { version = "0.9.0", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev= "7b1aa33b2f7d1f19b80e270c83320f0f94daff69", default_features = false, optional = true }
tabled = { version = "0.12.0", optional = true }

View File

@@ -64,8 +64,8 @@ More notebook tutorials can be found within `examples/notebooks`.
#### CLI
Install the CLI
```bash
curl https://hub.ezkl.xyz/install_ezkl_cli.sh | bash
``` shell
curl https://github.com/JSeam2/ezkl/blob/main/install_ezkl_cli.sh | bash
```
https://user-images.githubusercontent.com/45801863/236771676-5bbbbfd1-ba6f-418a-902e-20738ce0e9f0.mp4

View File

@@ -271,7 +271,7 @@
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
"\n",
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elemenets :-D). \n",
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
"Here is what the schema for an on-chain data source graph input file should look like:\n",
" \n",
"```json\n",

View File

@@ -42,7 +42,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {
"id": "gvQ5HL1bTDWF"
},
@@ -441,9 +441,9 @@
"# Serialize calibration data into file:\n",
"json.dump(data, open(cal_data_path, 'w'))\n",
"\n",
"# Optimize for resources, we cap logrows at 17 to reduce setup and proving time, at the expense of accuracy\n",
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
"# You may want to increase the max logrows if accuracy is a concern\n",
"res = ezkl.calibrate_settings(cal_data_path, model_path, settings_path, \"resources\", max_logrows = 17)"
"res = ezkl.calibrate_settings(cal_data_path, model_path, settings_path, \"resources\", max_logrows = 12, scales = [2])"
]
},
{
@@ -508,9 +508,8 @@
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" \n",
" )\n",
" \n",
"\n",
"\n",
"assert res == True\n",
"assert os.path.isfile(vk_path)\n",
@@ -565,7 +564,6 @@
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \n",
" \"single\",\n",
" )\n",
"\n",

View File

@@ -7,7 +7,7 @@
"source": [
"# kzg-ezkl\n",
"\n",
"Here's an example leveraging EZKL whereby the inputs to the model, and the model params themselves, are commited to using kzg-commitments inside a circuit.\n",
"Here's an example leveraging EZKL whereby the inputs to the model, and the model params themselves, are committed to using kzg-commitments inside a circuit.\n",
"\n",
"In this setup:\n",
"- the commitments are publicly known to the prover and verifier\n",
@@ -166,7 +166,7 @@
"Shoutouts: \n",
"\n",
"- [summa-solvency](https://github.com/summa-dev/summa-solvency) for their help with the poseidon hashing chip. \n",
"- [timeofey](https://github.com/timoftime) for providing inspiration in our developement of the el-gamal encryption circuit in Halo2. "
"- [timeofey](https://github.com/timoftime) for providing inspiration in our development of the el-gamal encryption circuit in Halo2. "
]
},
{

View File

@@ -780,7 +780,7 @@
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
"settings_path = os.path.join('settings.json')\n",
"",
"\n",
"witness_path = os.path.join('witness.json')\n",
"data_path = os.path.join('input.json')"
]
@@ -845,7 +845,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [5,6])\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
"assert res == True"
]
},
@@ -887,11 +887,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 30,
"metadata": {
"id": "12YIcFr85X9-"
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"spawning module 2\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"quotient_poly_degree 4\n",
"n 262144\n",
"extended_k 20\n"
]
}
],
"source": [
"res = ezkl.setup(\n",
" compiled_model_path,\n",
@@ -971,9 +988,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
}

View File

@@ -8,7 +8,7 @@
"source": [
"## EZKL Jupyter Notebook Demo \n",
"\n",
"Here we demonstrate how to use the EZKL package to run a publicly known / committted to network on some private data, producing a public output.\n"
"Here we demonstrate how to use the EZKL package to run a publicly known / committed to network on some private data, producing a public output.\n"
]
},
{

View File

@@ -154,7 +154,7 @@
"source": [
"## Create a neural net to verify the execution of the tic tac toe model\n",
"\n",
"1. Given the data generated above classify whether the tic tac toe games are valid. This approach uses a binary classification as the tic tac toe state space is fairly small. For larger state spaces we will want to use anomaly detection based approachs"
"1. Given the data generated above classify whether the tic tac toe games are valid. This approach uses a binary classification as the tic tac toe state space is fairly small. For larger state spaces, we will want to use anomaly detection based approaches."
]
},
{

View File

@@ -237,7 +237,7 @@
"\n",
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
"ezkl.calibrate_settings(\n",
" input_filename, onnx_filename, settings_filename, \"resources\")\n",
" input_filename, onnx_filename, settings_filename, \"resources\", scales = [4])\n",
"res = ezkl.get_srs(settings_filename)\n",
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)\n",
"\n",
@@ -255,7 +255,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"metadata": {
"id": "fULvvnK7_CMb"
},
@@ -451,7 +451,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
}
},
"nbformat": 4,

View File

@@ -49,7 +49,7 @@
"import torch\n",
"import math\n",
"\n",
"# these are constatns for the rotation\n",
"# these are constants for the rotation\n",
"phi = torch.tensor(5 * math.pi / 180)\n",
"s = torch.sin(phi)\n",
"c = torch.cos(phi)\n",

170
install_ezkl_cli.sh Normal file
View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
set -e
BASE_DIR=${XDG_CONFIG_HOME:-$HOME}
EZKL_DIR=${EZKL_DIR-"$BASE_DIR/.ezkl"}
# Create the .ezkl bin directory if it doesn't exit
mkdir -p $EZKL_DIR
# Store the correct profile file (i.e. .profile for bash or .zshenv for ZSH).
case $SHELL in
*/zsh)
PROFILE=${ZDOTDIR-"$HOME"}/.zshenv
PREF_SHELL=zsh
;;
*/bash)
PROFILE=$HOME/.bashrc
PREF_SHELL=bash
;;
*/fish)
PROFILE=$HOME/.config/fish/config.fish
PREF_SHELL=fish
;;
*/ash)
PROFILE=$HOME/.profile
PREF_SHELL=ash
;;
*)
echo "NOTICE: Shell could not be detected, you will need to manually add ${EZKL_DIR} to your PATH."
esac
# Check for non standard installation of ezkl
if [ "$(which ezkl)s" != "s" ] && [ "$(which ezkl)" != "$EZKL_DIR/ezkl" ] ; then
echo "ezkl is installed in a non-standard directory, $(which ezkl). To use the automated installer, remove the existing ezkl from path to prevent conflicts"
exit 1
fi
if [[ ":$PATH:" != *":${EZKl_DIR}:"* ]]; then
# Add the ezkl directory to the path and ensure the old PATH variables remain.
echo >> $PROFILE && echo "export PATH=\"\$PATH:$EZKL_DIR\"" >> $PROFILE
fi
# Install latest ezkl version
# Get the right release URL
if [ -z "$1" ]
then
RELEASE_URL="https://api.github.com/repos/zkonduit/ezkl/releases/latest"
echo "No version tags provided, installing the latest ezkl version"
else
RELEASE_URL="https://api.github.com/repos/zkonduit/ezkl/releases/tags/$1"
echo "Installing ezkl version $1"
fi
PLATFORM=""
case "$(uname -s)" in
Darwin*)
PLATFORM="macos"
;;
Linux*Microsoft*)
PLATFORM="linux"
;;
Linux*)
PLATFORM="linux"
;;
CYGWIN*|MINGW*|MINGW32*|MSYS*)
PLATFORM="windows-msvc"
;;
*)
echo "Platform is not supported. If you would need support for the platform please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
;;
esac
# Check arch
ARCHITECTURE="$(uname -m)"
if [ "${ARCHITECTURE}" = "x86_64" ]; then
# Redirect stderr to /dev/null to avoid printing errors if non Rosetta.
if [ "$(sysctl -n sysctl.proc_translated 2>/dev/null)" = "1" ]; then
ARCHITECTURE="arm64" # Rosetta.
else
ARCHITECTURE="amd64" # Intel.
fi
elif [ "${ARCHITECTURE}" = "arm64" ] ||[ "${ARCHITECTURE}" = "aarch64" ]; then
ARCHITECTURE="aarch64" # Arm.
elif [ "${ARCHITECTURE}" = "amd64" ]; then
ARCHITECTURE="amd64" # Amd
else
echo "Architecture is not supported. If you would need support for the architecture please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
# Remove existing ezkl
echo "Removing old ezkl binary if it exists"
[ -e file ] && rm file
# download the release and unpack the right tarball
if [ "$PLATFORM" == "windows-msvc" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-windows-msvc.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz"
elif [ "$PLATFORM" == "macos" ]; then
if [ "$ARCHITECTURE" == "aarch64" ] || [ "$ARCHITECTURE" == "arm64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-macos-aarch64.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz"
else
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-macos.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz"
fi
elif [ "$PLATFORM" == "linux" ]; then
if [ "${ARCHITECTURE}" = "amd64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-linux-gnu.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz"
else
echo "ARM architectures are not supported for Linux at the moment. If you would need support for the ARM architectures on linux please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
else
echo "Platform and Architecture is not supported. If you would need support for the platform and architecture please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
echo && echo "Successfully downloaded ezkl at ${EZKL_DIR}"
echo "We detected that your preferred shell is ${PREF_SHELL} and added ezkl to PATH. Run 'source ${PROFILE}' or start a new terminal session to use ezkl."

View File

@@ -823,8 +823,6 @@ pub(crate) fn calibrate(
"input scale: {}, param scale: {}, scale rebase multiplier: {}",
input_scale, param_scale, scale_rebase_multiplier
));
// vec of settings copied chunks.len() times
let run_args_iterable = vec![settings.run_args.clone(); chunks.len()];
#[cfg(unix)]
let _r = match Gag::stdout() {
@@ -836,41 +834,41 @@ pub(crate) fn calibrate(
Ok(r) => Some(r),
Err(_) => None,
};
let key = (input_scale, param_scale, scale_rebase_multiplier);
forward_pass_res.insert(key, vec![]);
let tasks = chunks
let local_run_args = RunArgs {
input_scale,
param_scale,
scale_rebase_multiplier,
..settings.run_args.clone()
};
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
Ok(c) => c,
Err(e) => {
// drop the gag
#[cfg(unix)]
std::mem::drop(_r);
#[cfg(unix)]
std::mem::drop(_q);
debug!("circuit creation from run args failed: {:?}", e);
continue;
}
};
chunks
.iter()
.zip(run_args_iterable)
.map(|(chunk, run_args)| {
// we need to create a new run args for each chunk
// time it
.map(|chunk| {
let chunk = chunk.clone();
let local_run_args = RunArgs {
input_scale,
param_scale,
scale_rebase_multiplier,
..run_args.clone()
};
let original_settings = settings.clone();
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
Ok(c) => c,
Err(_) => {
return Err(format!("failed to create circuit from run args"))
as Result<GraphSettings, String>
}
};
let data = circuit
.load_graph_from_file_exclusively(&chunk)
.map_err(|e| format!("failed to load circuit inputs: {}", e))?;
let forward_res = circuit
.calibrate(&data, max_logrows, lookup_safety_margin)
.map_err(|e| format!("failed to calibrate: {}", e))?;
.forward(&mut data.clone(), None, None)
.map_err(|e| format!("failed to forward: {}", e))?;
// push result to the hashmap
forward_pass_res
@@ -878,38 +876,32 @@ pub(crate) fn calibrate(
.ok_or("key not found")?
.push(forward_res);
let settings = circuit.settings().clone();
let found_run_args = RunArgs {
input_scale: settings.run_args.input_scale,
param_scale: settings.run_args.param_scale,
lookup_range: settings.run_args.lookup_range,
logrows: settings.run_args.logrows,
scale_rebase_multiplier: settings.run_args.scale_rebase_multiplier,
..run_args.clone()
};
let found_settings = GraphSettings {
run_args: found_run_args,
required_lookups: settings.required_lookups,
model_output_scales: settings.model_output_scales,
model_input_scales: settings.model_input_scales,
num_rows: settings.num_rows,
total_assignments: settings.total_assignments,
total_const_size: settings.total_const_size,
..original_settings.clone()
};
Ok(found_settings) as Result<GraphSettings, String>
Ok(()) as Result<(), String>
})
.collect::<Vec<Result<GraphSettings, String>>>();
.collect::<Result<Vec<()>, String>>()?;
let mut res: Vec<GraphSettings> = vec![];
for task in tasks {
if let Ok(task) = task {
res.push(task);
}
}
let min_lookup_range = forward_pass_res
.get(&key)
.unwrap()
.iter()
.map(|x| x.min_lookup_inputs)
.min()
.unwrap_or(0);
let max_lookup_range = forward_pass_res
.get(&key)
.unwrap()
.iter()
.map(|x| x.max_lookup_inputs)
.max()
.unwrap_or(0);
let res = circuit.calibrate_from_min_max(
min_lookup_range,
max_lookup_range,
max_logrows,
lookup_safety_margin,
);
// drop the gag
#[cfg(unix)]
@@ -917,31 +909,37 @@ pub(crate) fn calibrate(
#[cfg(unix)]
std::mem::drop(_q);
let max_lookup_range = res
.iter()
.map(|x| x.run_args.lookup_range.1)
.max()
.unwrap_or(0);
let min_lookup_range = res
.iter()
.map(|x| x.run_args.lookup_range.0)
.min()
.unwrap_or(0);
if res.is_ok() {
let new_settings = circuit.settings().clone();
let found_run_args = RunArgs {
input_scale: new_settings.run_args.input_scale,
param_scale: new_settings.run_args.param_scale,
lookup_range: new_settings.run_args.lookup_range,
logrows: new_settings.run_args.logrows,
scale_rebase_multiplier: new_settings.run_args.scale_rebase_multiplier,
..settings.run_args.clone()
};
let found_settings = GraphSettings {
run_args: found_run_args,
required_lookups: new_settings.required_lookups,
model_output_scales: new_settings.model_output_scales,
model_input_scales: new_settings.model_input_scales,
num_rows: new_settings.num_rows,
total_assignments: new_settings.total_assignments,
total_const_size: new_settings.total_const_size,
..settings.clone()
};
found_params.push(found_settings.clone());
if let Some(mut best) = res.into_iter().max_by_key(|p| {
(
p.run_args.logrows,
p.run_args.input_scale,
p.run_args.param_scale,
)
}) {
best.run_args.lookup_range = (min_lookup_range, max_lookup_range);
// pick the one with the largest logrows
found_params.push(best.clone());
debug!(
"found settings: \n {}",
best.as_json()?.to_colored_json_auto()?
found_settings.as_json()?.to_colored_json_auto()?
);
} else {
debug!("calibration failed {}", res.err().unwrap());
}
pb.inc(1);
@@ -1034,7 +1032,7 @@ pub(crate) fn calibrate(
let tear_sheet_table = Table::new(vec![accuracy_res]);
println!(
warn!(
"\n\n <------------- Numerical Fidelity Report (input_scale: {}, param_scale: {}, scale_input_multiplier: {}) ------------->\n\n{}\n\n",
best_params.run_args.input_scale,
best_params.run_args.param_scale,

View File

@@ -956,10 +956,14 @@ impl GraphCircuit {
(ASSUMED_BLINDING_FACTORS + RESERVED_BLINDING_ROWS_PAD) as f64
}
fn calc_safe_lookup_range(res: &GraphWitness, lookup_safety_margin: i128) -> (i128, i128) {
fn calc_safe_lookup_range(
min_lookup_inputs: i128,
max_lookup_inputs: i128,
lookup_safety_margin: i128,
) -> (i128, i128) {
let mut margin = (
lookup_safety_margin * res.min_lookup_inputs,
lookup_safety_margin * res.max_lookup_inputs,
lookup_safety_margin * min_lookup_inputs,
lookup_safety_margin * max_lookup_inputs,
);
if lookup_safety_margin == 1 {
margin.0 -= 1;
@@ -978,7 +982,8 @@ impl GraphCircuit {
fn calc_min_logrows(
&mut self,
res: &GraphWitness,
min_lookup_inputs: i128,
max_lookup_inputs: i128,
max_logrows: Option<u32>,
lookup_safety_margin: i128,
) -> Result<(), Box<dyn std::error::Error>> {
@@ -986,19 +991,24 @@ impl GraphCircuit {
let max_logrows = max_logrows.unwrap_or(MAX_PUBLIC_SRS);
let max_logrows = std::cmp::min(max_logrows, MAX_PUBLIC_SRS);
let mut max_logrows = std::cmp::max(max_logrows, MIN_LOGROWS);
let mut min_logrows = MIN_LOGROWS;
let reserved_blinding_rows = Self::reserved_blinding_rows();
// check if has overflowed max lookup input
if res.max_lookup_inputs > MAX_LOOKUP_ABS / lookup_safety_margin
|| res.min_lookup_inputs < -MAX_LOOKUP_ABS / lookup_safety_margin
if max_lookup_inputs > MAX_LOOKUP_ABS / lookup_safety_margin
|| min_lookup_inputs < -MAX_LOOKUP_ABS / lookup_safety_margin
{
let err_string = format!("max lookup input ({}) is too large", res.max_lookup_inputs);
let err_string = format!("max lookup input ({}) is too large", max_lookup_inputs);
error!("{}", err_string);
return Err(err_string.into());
}
let safe_range = Self::calc_safe_lookup_range(res, lookup_safety_margin);
let mut min_logrows = MIN_LOGROWS;
let safe_range = Self::calc_safe_lookup_range(
min_lookup_inputs,
max_lookup_inputs,
lookup_safety_margin,
);
// degrade the max logrows until the extended k is small enough
while min_logrows < max_logrows
&& !self.extended_k_is_small_enough(
@@ -1020,8 +1030,7 @@ impl GraphCircuit {
return Err(err_string.into());
}
// degrade the max logrows until the extended k is small enough
while max_logrows > min_logrows
while min_logrows < max_logrows
&& !self.extended_k_is_small_enough(
max_logrows,
Self::calc_num_cols(safe_range, max_logrows),
@@ -1030,6 +1039,17 @@ impl GraphCircuit {
max_logrows -= 1;
}
if !self
.extended_k_is_small_enough(max_logrows, Self::calc_num_cols(safe_range, max_logrows))
{
let err_string = format!(
"extended k is too large to accommodate the quotient polynomial with logrows {}",
max_logrows
);
error!("{}", err_string);
return Err(err_string.into());
}
let min_bits = ((safe_range.1 - safe_range.0) as f64 + reserved_blinding_rows + 1.)
.log2()
.ceil() as usize;
@@ -1111,22 +1131,31 @@ impl GraphCircuit {
// n = 2^k
let n = 1u64 << k;
let mut extended_k = k;
while (1 << extended_k) < (n * quotient_poly_degree) {
extended_k += 1;
if !(extended_k <= bn256::Fr::S) {
return false;
}
}
extended_k <= bn256::Fr::S
true
}
/// Calibrate the circuit to the supplied data.
pub fn calibrate(
pub fn calibrate_from_min_max(
&mut self,
input: &[Tensor<Fp>],
min_lookup_inputs: i128,
max_lookup_inputs: i128,
max_logrows: Option<u32>,
lookup_safety_margin: i128,
) -> Result<GraphWitness, Box<dyn std::error::Error>> {
let res = self.forward(&mut input.to_vec(), None, None)?;
self.calc_min_logrows(&res, max_logrows, lookup_safety_margin)?;
Ok(res)
) -> Result<(), Box<dyn std::error::Error>> {
self.calc_min_logrows(
min_lookup_inputs,
max_lookup_inputs,
max_logrows,
lookup_safety_margin,
)?;
Ok(())
}
/// Runs the forward pass of the model / graph of computations and any associated hashing.

View File

@@ -1022,7 +1022,6 @@ fn print_proof_hex(proof_path: PathBuf) -> Result<String, PyErr> {
// Python Module
#[pymodule]
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
// NOTE: DeployVerifierEVM and SendProofEVM will be implemented in python in pyezkl
pyo3_log::init();
m.add_class::<PyRunArgs>()?;
m.add_class::<PyG1Affine>()?;

View File

@@ -516,7 +516,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 1.2);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6);
test_dir.close().unwrap();
}
@@ -526,7 +526,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 1.2);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6);
test_dir.close().unwrap();
}
@@ -536,7 +536,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 1.2);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6);
test_dir.close().unwrap();
}
@@ -1901,6 +1901,20 @@ mod native_tests {
.map(|h| vec![FileSourceInner::Field(*h)])
.collect(),
));
} else {
input.output_data = Some(DataSource::File(
witness
.pretty_elements
.unwrap()
.rescaled_outputs
.iter()
.map(|o| {
o.iter()
.map(|f| FileSourceInner::Float(f.parse().unwrap()))
.collect()
})
.collect(),
));
}
input.save(data_path.clone().into()).unwrap();

View File

@@ -118,6 +118,7 @@ mod py_tests {
}
const TESTS: [&str; 32] = [
"variance.ipynb",
"proof_splitting.ipynb",
"mnist_gan_proof_splitting.ipynb",
"mnist_gan.ipynb",
@@ -126,7 +127,6 @@ mod py_tests {
"hashed_vis.ipynb",
"simple_demo_all_public.ipynb",
"data_attest.ipynb",
"variance.ipynb",
"little_transformer.ipynb",
"simple_demo_aggregated_proofs.ipynb",
"ezkl_demo.ipynb",