docs: migrate from Sphinx to Gitbook

This commit is contained in:
aquint-zama
2022-05-19 15:15:42 +02:00
committed by Alex Quint
parent dc501fb0ae
commit 2a42b5f711
45 changed files with 299 additions and 1712 deletions

1
.gitbook.yaml Normal file
View File

@@ -0,0 +1 @@
root: ./docs

View File

@@ -230,12 +230,6 @@ jobs:
run: |
make pcc
- name: Build docs
id: cbd
if: ${{ steps.install-deps.outcome == 'success' && !cancelled() }}
run: |
make docs
- name: Generate release changelog
id: changelog
if: ${{ fromJSON(env.IS_RELEASE) && steps.install-deps.outcome == 'success' && !cancelled() }}
@@ -252,28 +246,13 @@ jobs:
id: conformance
if: ${{ always() && !cancelled() }}
env:
CONFORMANCE_STATUS: ${{ steps.commit-conformance.outcome == 'success' && steps.cs.outcome == 'success' && steps.cbd.outcome == 'success' }}
CONFORMANCE_STATUS: ${{ steps.commit-conformance.outcome == 'success' && steps.cs.outcome == 'success' }}
run: |
if [[ "${CONFORMANCE_STATUS}" != "true" ]]; then
echo "Conformance failed, check logs"
exit 1
fi
# Taring the docs allows for much faster upload speed (from ~3min worst case to ~2s best case)
- name: Tar docs artifacts
if: ${{ steps.conformance.outcome == 'success' && !cancelled() }}
run: |
cd docs/_build/html
tar -cvf docs.tar ./*
# Only upload docs once from reference build
- name: Archive docs artifacts
if: ${{ fromJSON(env.IS_REF_BUILD) && steps.conformance.outcome == 'success' && !cancelled() }}
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
with:
name: html-docs
path: docs/_build/html/docs.tar
- name: Upload changelog artifacts
if: ${{ fromJSON(env.IS_REF_BUILD) && steps.changelog.outcome == 'success' && !cancelled() }}
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8
@@ -473,93 +452,6 @@ jobs:
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
publish-docs:
needs: [build-linux]
outputs:
report: ${{ steps.report.outputs.report || 'Did not run.' }}
runs-on: ubuntu-20.04
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
steps:
- name: Prepare docs push
id: docs-push-infos
run: |
if [[ ${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_BUCKET_NAME }} != "" ]] && \
[[ ${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_DISTRIBUTION_ID }} != "" ]]; then
REF_NAME=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///g')
echo "::set-output name=has-preprod::true"
echo "::set-output name=aws-bucket::${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_BUCKET_NAME }}"
echo "::set-output name=aws-distribution::${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_DISTRIBUTION_ID }}"
echo "::set-output name=dest-dir::concrete-numpy/${REF_NAME}"
else
echo "::set-output name=has-preprod::false"
fi
- name: Download Documentation
if: ${{ fromJSON(steps.docs-push-infos.outputs.has-preprod) }}
id: download
uses: actions/download-artifact@fb598a63ae348fa914e94cd0ff38f362e927b741
with:
name: html-docs
- name: Untar docs artifacts
id: untar
if: ${{ fromJSON(steps.docs-push-infos.outputs.has-preprod) }}
run: |
tar -xvf docs.tar
rm docs.tar
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@05b148adc31e091bafbaf404f745055d4d3bc9d2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Publish Documentation to S3
id: publish
if: ${{ steps.untar.outcome == 'success' && !cancelled() }}
env:
AWS_S3_BUCKET: ${{ steps.docs-push-infos.outputs.aws-bucket }}
SOURCE_DIR: '.'
DEST_DIR: ${{ steps.docs-push-infos.outputs.dest-dir }}
run: |
aws s3 sync "${SOURCE_DIR}" s3://"${AWS_S3_BUCKET}/${DEST_DIR}" --delete --acl public-read
- name: Invalidate CloudFront Cache
if: ${{ steps.publish.outcome == 'success' }}
env:
SOURCE_PATH: "/${{ steps.docs-push-infos.outputs.dest-dir }}/*"
DISTRIBUTION_ID: ${{ steps.docs-push-infos.outputs.aws-distribution }}
run: |
aws cloudfront create-invalidation \
--distribution-id "${DISTRIBUTION_ID}" \
--paths "${SOURCE_PATH}"
- name: Set notification report
id: report
if: ${{ always() }}
run: |
REPORT="Publishing documentation finished with status ${{ job.status }}. \
Pushed to preprod: ${{ steps.docs-push-infos.outputs.has-preprod }}"
echo "${REPORT}"
echo "::set-output name=report::${REPORT}"
echo "REPORT=${REPORT}" >> "$GITHUB_ENV"
- name: Slack Notification
if: ${{ always() && !success() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "${{ env.REPORT }} (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
package-release:
needs: [build-linux]
if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }}
@@ -649,21 +541,6 @@ jobs:
mkdir -p "${ARTIFACTS_PACKAGED_DIR}"
echo "ARTIFACTS_PACKAGED_DIR=${ARTIFACTS_PACKAGED_DIR}" >> "$GITHUB_ENV"
- name: Download Documentation
if: ${{ success() && !cancelled() }}
id: download-docs
uses: actions/download-artifact@fb598a63ae348fa914e94cd0ff38f362e927b741
with:
name: html-docs
path: ${{ env.ARTIFACTS_RAW_DIR }}/html_docs/
- name: Untar docs artifacts
if: ${{ success() && !cancelled() }}
run: |
cd ${{ steps.download-docs.outputs.download-path }}
tar -xvf docs.tar
rm docs.tar
- name: Download changelog
if: ${{ success() && !cancelled() }}
id: download-changelog
@@ -717,70 +594,11 @@ jobs:
docker run --rm -v "$(pwd)"/docker/release_resources:/data \
"${PRIVATE_RELEASE_IMG_GIT_TAG}" /bin/bash -c "python ./sanity_check.py"
- name: Prepare docs push
id: docs-push-infos
run: |
if [[ ${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_BUCKET_NAME }} != "" ]] && \
[[ ${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_DISTRIBUTION_ID }} != "" ]] && \
[[ "${IS_PRERELEASE}" == "true" ]]; then
echo "::set-output name=aws-bucket::${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_BUCKET_NAME }}"
echo "::set-output name=aws-distribution::${{ secrets.AWS_REPO_PREPROD_DOCUMENTATION_DISTRIBUTION_ID }}"
else
echo "::set-output name=aws-bucket::${{ secrets.AWS_REPO_DOCUMENTATION_BUCKET_NAME }}"
echo "::set-output name=aws-distribution::${{ secrets.AWS_REPO_DOCUMENTATION_DISTRIBUTION_ID }}"
fi
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@05b148adc31e091bafbaf404f745055d4d3bc9d2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Update versions.json for docs
if: ${{ success() && !cancelled() }}
env:
RAW_DOCS_DIR: ${{ steps.download-docs.outputs.download-path }}
run: |
DOWNLOADED_VERSIONS_JSON_FILE=$(mktemp --suffix=.json)
OUTPUT_VERSIONS_JSON_FILE=$(mktemp --suffix=.json)
OPTS=""
if [[ $IS_LATEST = "true" ]]; then
OPTS="${OPTS} --latest "
fi
if [[ $IS_PRERELEASE = "true" ]]; then
OPTS="${OPTS} --prerelease "
fi
aws s3api get-object \
--bucket ${{ steps.docs-push-infos.outputs.aws-bucket }} \
--key concrete-numpy/versions.json "${DOWNLOADED_VERSIONS_JSON_FILE}"
# shellcheck disable=SC2086
poetry run python ./script/actions_utils/generate_versions_json.py \
--add-version "${PROJECT_VERSION}" \
--versions-json-file "${DOWNLOADED_VERSIONS_JSON_FILE}" \
--output-json "${OUTPUT_VERSIONS_JSON_FILE}" \
$OPTS
echo "OUTPUT_VERSIONS_JSON_FILE=${OUTPUT_VERSIONS_JSON_FILE}" >> "$GITHUB_ENV"
# Copy to docs to keep a version in docs artifacts
cp "${OUTPUT_VERSIONS_JSON_FILE}" "${RAW_DOCS_DIR}"/versions.json
- name: Create ready to upload/packaged artifacts and release body
if: ${{ success() && !cancelled() }}
env:
RAW_DOCS_DIR: ${{ steps.download-docs.outputs.download-path }}
RAW_CHANGELOG_DIR: ${{ steps.download-changelog.outputs.download-path }}
run: |
pushd "${RAW_DOCS_DIR}"
zip -r "${ARTIFACTS_PACKAGED_DIR}/html-docs.zip" ./*
tar -cvzf "${ARTIFACTS_PACKAGED_DIR}/html-docs.tar.gz" ./*
# Remove the versions.json to avoid pushing it to S3 but have it in release artifacts
rm versions.json
popd
cp "${RAW_CHANGELOG_DIR}"/* "${ARTIFACTS_PACKAGED_DIR}"
ls -a "${ARTIFACTS_PACKAGED_DIR}"
@@ -791,7 +609,6 @@ jobs:
{
echo "Docker Image: ${PRIVATE_RELEASE_IMG_GIT_TAG}";
echo "PyPI: https://pypi.org/project/concrete-numpy/${PROJECT_VERSION}";
echo "Documentation: https://${{ steps.docs-push-infos.outputs.aws-bucket }}/concrete-numpy/${PROJECT_VERSION}";
echo "";
} >> "${RELEASE_BODY_FILE}"
cat "${RAW_CHANGELOG_DIR}"/* >> "${RELEASE_BODY_FILE}"
@@ -818,34 +635,6 @@ jobs:
-u "${{ secrets.INTERNAL_PYPI_BOT_USERNAME }}" -p "${{ secrets.INTERNAL_PYPI_BOT_PASSWORD }}" \
--repository-url "${{ secrets.INTERNAL_PYPI_URL }}" "${{ env.ARTIFACTS_PACKAGED_DIR }}"/*.whl
- name: Push release documentation
if: ${{ success() && !cancelled() }}
env:
AWS_S3_BUCKET: ${{ steps.docs-push-infos.outputs.aws-bucket }}
SOURCE_DIR: ${{ steps.download-docs.outputs.download-path }}
DEST_DIR: 'concrete-numpy/${{ env.PROJECT_VERSION }}'
run: |
aws s3 sync "${SOURCE_DIR}" s3://"${AWS_S3_BUCKET}/${DEST_DIR}" --delete --acl public-read
- name: Push release documentation as stable
if: ${{ success() && !cancelled() && !fromJSON(env.IS_PRERELEASE) && fromJSON(env.IS_LATEST) }}
env:
AWS_S3_BUCKET: ${{ steps.docs-push-infos.outputs.aws-bucket }}
SOURCE_DIR: ${{ steps.download-docs.outputs.download-path }}
DEST_DIR: 'concrete-numpy/stable'
run: |
aws s3 sync "${SOURCE_DIR}" s3://"${AWS_S3_BUCKET}/${DEST_DIR}" --delete --acl public-read
- name: Invalidate CloudFront Cache for stable
if: ${{ success() && !fromJSON(env.IS_PRERELEASE) && fromJSON(env.IS_LATEST) }}
env:
SOURCE_PATH: "/concrete-numpy/stable/*"
DISTRIBUTION_ID: ${{ steps.docs-push-infos.outputs.aws-distribution }}
run: |
aws cloudfront create-invalidation \
--distribution-id "${DISTRIBUTION_ID}" \
--paths "${SOURCE_PATH}"
- name: Create GitHub release
if: ${{ success() && !cancelled() }}
id: create-release
@@ -859,17 +648,6 @@ jobs:
fail_on_unmatched_files: true
token: ${{ secrets.BOT_TOKEN }}
- name: Push updated versions.json
if: ${{ success() }}
run: |
aws s3 cp "${OUTPUT_VERSIONS_JSON_FILE}" \
s3://${{ steps.docs-push-infos.outputs.aws-bucket }}/concrete-numpy/versions.json \
--acl public-read
aws cloudfront create-invalidation \
--distribution-id ${{ steps.docs-push-infos.outputs.aws-distribution }} \
--paths /concrete-numpy/versions.json
- name: Set notification report
id: report
if: ${{ always() }}
@@ -901,7 +679,6 @@ jobs:
build-linux,
stop-runner-linux,
build-macos,
publish-docs,
package-release,
]
@@ -936,7 +713,6 @@ jobs:
- build-linux: ${{ needs.build-linux.result || 'Did not run.' }}\n\n\
- stop-runner-linux: ${{ needs.stop-runner-linux.result || 'Did not run.'}}\n\n\
- build-macos: ${{ needs.build-macos.result || 'Did not run.' }}\n\n\
- publish-docs: ${{ needs.publish-docs.outputs.report || 'Did not run.' }}\n\n\
- package-release: ${{ needs.package-release.outputs.report || 'Did not run.' }}"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -74,7 +74,7 @@ pcc:
--no-print-directory pcc_internal
PCC_DEPS := check_python_format check_finalize_nb python_linting mypy_ci pydocstyle shell_lint
PCC_DEPS += check_version_coherence check_supported_functions check_licenses
PCC_DEPS += check_supported_functions check_licenses
# Not commented on purpose for make help, since internal
.PHONY: pcc_internal
@@ -160,22 +160,6 @@ docker_clean_volumes:
.PHONY: docker_cv # Docker clean volumes
docker_cv: docker_clean_volumes
.PHONY: docs # Build docs
docs: clean_docs supported_functions
@# Generate the auto summary of documentations
poetry run sphinx-apidoc -o docs/_apidoc $(SRC_DIR)
@# Docs
cd docs && poetry run $(MAKE) html SPHINXOPTS='-W --keep-going'
.PHONY: clean_docs # Clean docs build directory
clean_docs:
rm -rf docs/_apidoc docs/_build
.PHONY: open_docs # Launch docs in a browser (macOS only)
open_docs:
@# This is macOS only. On other systems, one would use `start` or `xdg-open`
open docs/_build/html/index.html
.PHONY: pydocstyle # Launch syntax checker on source code documentation
pydocstyle:
@# From http://www.pydocstyle.org/en/stable/error_codes.html
@@ -247,18 +231,14 @@ set_version:
git stash pop; \
fi
.PHONY: check_version_coherence # Check that all files containing version have the same value
check_version_coherence:
poetry run python ./script/make_utils/version_utils.py check-version
.PHONY: changelog # Generate a changelog
changelog: check_version_coherence
changelog:
PROJECT_VER=($$(poetry version)) && \
PROJECT_VER="$${PROJECT_VER[1]}" && \
poetry run python ./script/make_utils/changelog_helper.py > "CHANGELOG_$${PROJECT_VER}.md"
.PHONY: release # Create a new release
release: check_version_coherence
release:
@PROJECT_VER=($$(poetry version)) && \
PROJECT_VER="$${PROJECT_VER[1]}" && \
TAG_NAME="v$${PROJECT_VER}" && \
@@ -287,11 +267,11 @@ todo:
.PHONY: supported_functions # Update docs with supported functions
supported_functions:
poetry run python script/doc_utils/gen_supported_ufuncs.py docs/user/howto/numpy_support.md
poetry run python script/doc_utils/gen_supported_ufuncs.py docs/basics/numpy_support.md
.PHONY: check_supported_functions # Check supported functions (for the doc)
check_supported_functions:
poetry run python script/doc_utils/gen_supported_ufuncs.py docs/user/howto/numpy_support.md --check
poetry run python script/doc_utils/gen_supported_ufuncs.py docs/basics/numpy_support.md --check
.PHONY: licenses # Generate the list of licenses of dependencies
licenses:

View File

@@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

39
docs/SUMMARY.md Normal file
View File

@@ -0,0 +1,39 @@
# User Guide
* [What is Concrete Numpy?](intro.md)
## Getting Started
* [Installation](basics/installing.md)
* [Compiling and Executing your first Numpy Function](basics/compiling_and_executing.md)
* [List of supported Numpy operations](basics/numpy_support.md)
## Tutorials
* [Table lookup](tutorial/table_lookup.md)
* [Working with floating points](tutorial/working_with_floating_points.md)
* [Indexing](tutorial/indexing.md)
* [Compilation artifacts](tutorial/compilation_artifacts.md)
## How To
* [Printing and drawing](howto/printing_and_drawing.md)
* [Reduce needed precision](howto/reduce_needed_precision.md)
* [Debug](howto/debug_support_submit_issues.md)
## Explanations
* [What is FHE?](explanation/what_is_fhe.md)
* [Framework limits](explanation/fhe_and_framework_limits.md)
* [Future features](explanation/future_features.md)
## Developer
* [Setup the project](dev/project_setup.md)
* [Use Docker](dev/docker.md)
* [Create a release](dev/releasing.md)
* [Contribute](dev/contributing.md)
* [Compilation](dev/compilation.md)
* [Terminology and structure](dev/terminology_and_structure.md)
* [Float fusing](dev/float-fusing.md)
* [MLIR](dev/mlir.md)

View File

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

View File

@@ -1,4 +1,4 @@
# Compiling and Executing a Numpy Function
# Compiling and Executing your first function
## Importing necessary components
@@ -10,7 +10,7 @@ import concrete.numpy as cnp
## Defining a function to compile
You need to have a python function that follows the [limits](../explanation/fhe_and_framework_limits.md) of **Concrete Numpy**. Here is a simple example:
You need to have a python function that follows the [limits](../explanation/fhe\_and\_framework_limits.md) of **Concrete Numpy**. Here is a simple example:
<!--pytest-codeblocks:cont-->
```python
@@ -60,7 +60,7 @@ circuit.draw(show=True)
Here is the graph from the previous code block drawn with `draw`:
![Drawn graph of previous code block](../../_static/howto/compiling_and_executing_example_graph.png)
![Drawn graph of previous code block](../\_static/basics/compiling\_and\_executing\_example\_graph.png)
## Performing homomorphic evaluation
@@ -78,10 +78,9 @@ circuit.encrypt_run_decrypt(0, 0)
# 0
```
```{caution}
Be careful about the inputs, though.
If you were to run with values outside the range of the inputset, the result might not be correct.
```
{% hint style="warning" %}
Be careful about the inputs, though. If you were to run with values outside the range of the inputset, the result might not be correct.
{% endhint %}
While `.encrypt_run_decrypt(...)` is a good start for prototyping examples, more advanced usages require control over the different steps that are happening behind the scene, mainly key generation, encryption, execution, and decryption. The different steps can of course be called separately as in the example below:
@@ -100,5 +99,5 @@ decrypted_result = circuit.decrypt(encrypted_result)
## Further reading
- [Working With Floating Points Tutorial](../tutorial/working_with_floating_points.md)
- [Table Lookup Tutorial](../tutorial/table_lookup.md)
* [Working With Floating Points Tutorial](../tutorial/working\_with\_floating\_points.md)
* [Table Lookup Tutorial](../tutorial/table\_lookup.md)

View File

@@ -8,18 +8,18 @@ To install **Concrete Numpy** from PyPi, run the following:
pip install concrete-numpy
```
```{note}
{% hint style='info' %}
Note that **concrete-numpy** has `pygraphviz` as an optional dependency to draw graphs.
```
{% endhint %}
```{WARNING}
{% hint style='info' %}
`pygraphviz` requires `graphviz` packages being installed on your OS, see <a href="https://pygraphviz.github.io/documentation/stable/install.html">https://pygraphviz.github.io/documentation/stable/install.html</a>
```
{% endhint %}
```{DANGER}
{% hint style='tip' %}
`graphviz` packages are binary packages that won't automatically be installed by pip.
Do check <a href="https://pygraphviz.github.io/documentation/stable/install.html">https://pygraphviz.github.io/documentation/stable/install.html</a> for instructions on how to install `graphviz` for `pygraphviz`.
```
{% endhint %}
You can install the extra python dependencies for drawing with:
@@ -44,10 +44,12 @@ The image can be used with docker volumes, [see the docker documentation here](h
You can then use this image with the following command:
```shell
# Without local volume:
docker run --rm -it -p 8888:8888 zamafhe/concrete-numpy:v0.2.0
```
# With local volume to save notebooks on host:
or with local volume to save notebooks on host:
```
docker run --rm -it -p 8888:8888 -v /host/path:/data zamafhe/concrete-numpy:v0.2.0
```

View File

@@ -106,6 +106,7 @@ We determine the shapes of the inputs from the inputset, and we infer the shapes
You can access the shape of a tensor by accessing the `shape` property, just like in numpy.
Here is an example:
```python
def function_to_compile(x):
return x.reshape((x.shape[0], -1))

View File

@@ -1,100 +0,0 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Concrete Numpy'
copyright = '2021, Zama'
author = 'Zama'
description = 'Zama Concrete Numpy'
root_url = os.environ.get("DOC_ROOT_URL", "/concrete-numpy")
root_url = root_url if root_url.endswith('/') else root_url + '/'
# The full version, including alpha/beta/rc tags
release = "0.6.0-rc7"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_parser",
"sphinx_copybutton",
"nbsphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
myst_enable_extensions = [
"amsmath",
"colon_fence",
"dollarmath",
]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Group member variables and methods separately (not alphabetically)
autodoc_member_order = "groupwise"
# -- Options for nbsphinx ----------------------------------------------------
nbsphinx_codecell_lexer = 'ipython3'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_zama_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_options = {
"github_url": "https://github.com/zama-ai/concrete-numpy",
"twitter_url": "https://twitter.com/zama_fhe",
"icon_links": [{
"name": "Discourse",
"url": "https://community.zama.ai/c/concrete-numpy/7",
"icon": "fab fa-discourse",
}],
"navigation_depth": 2,
"collapse_navigation": True,
"google_analytics_id": "G-XRM93J9QBW",
}
html_context = {
"show_version": True,
"author": author,
"description": description,
"language": "en",
"versions_url": "#",
}
html_title = "%s Manual" % (project)
# Uncomment for test
# html_extra_path = ["versions.json", "alert.html"]
def setup(app):
html_init = f"const CURRENT_VERSION = {release!r};"
html_init += f"const ROOT_URL = {root_url!r};"
app.add_js_file(None, body=html_init, priority=100)

View File

@@ -2,10 +2,7 @@
## What is **concrete-numpy**?
**concrete-numpy** is a convenient python package, made on top of **Concrete compiler** and **Concrete library**, for developing homomorphic applications.
One of its essential functionalities is to transform Python functions to their `MLIR` equivalent.
Unfortunately, not all python functions can be converted due to the limits of current product (we are in the alpha stage), or sometimes due to inherent restrictions of FHE itself.
However, you can already build interesting and impressing use cases, and more will be available in further versions of the framework.
**concrete-numpy** is a convenient python package, made on top of **Concrete compiler** and **Concrete library**, for developing homomorphic applications. One of its essential functionalities is to transform Python functions to their `MLIR` equivalent. Unfortunately, not all python functions can be converted due to the limits of current product (we are in the alpha stage), or sometimes due to inherent restrictions of FHE itself. However, you can already build interesting and impressing use cases, and more will be available in further versions of the framework.
## How can I use it?
@@ -30,27 +27,19 @@ circuit.encrypt_run_decrypt(1, 0)
## Overview of the numpy compilation process
The compilation journey begins with tracing to get an easy to understand and manipulate representation of the function.
We call this representation `Computation Graph` which is basically a Directed Acyclic Graph (DAG) containing nodes representing the computations done in the function.
Working with graphs is good because they have been studied extensively over the years and there are a lot of algorithms to manipulate them.
Internally, we use [networkx](https://networkx.org) which is an excellent graph library for Python.
The compilation journey begins with tracing to get an easy to understand and manipulate representation of the function. We call this representation `Computation Graph` which is basically a Directed Acyclic Graph (DAG) containing nodes representing the computations done in the function. Working with graphs is good because they have been studied extensively over the years and there are a lot of algorithms to manipulate them. Internally, we use [networkx](https://networkx.org) which is an excellent graph library for Python.
The next step in the compilation is transforming the computation graph.
There are many transformations we perform, and they will be discussed in their own sections.
In any case, the result of transformations is just another computation graph.
The next step in the compilation is transforming the computation graph. There are many transformations we perform, and they will be discussed in their own sections. In any case, the result of transformations is just another computation graph.
After transformations are applied, we need to determine the bounds (i.e., the minimum and the maximum values) of each intermediate node.
This is required because FHE currently allows a limited precision for computations.
Bound measurement is our way to know what is the needed precision for the function.
After transformations are applied, we need to determine the bounds (i.e., the minimum and the maximum values) of each intermediate node. This is required because FHE currently allows a limited precision for computations. Bound measurement is our way to know what is the needed precision for the function.
The final step is to transform the computation graph to equivalent `MLIR` code.
How this is done will be explained in detail in its own chapter.
The final step is to transform the computation graph to equivalent `MLIR` code. How this is done will be explained in detail in its own chapter.
Once the MLIR is prepared, the rest of the stack, which you can learn more about [here](http://docs.zama.ai/), takes over and completes the compilation process.
Here is the visual representation of the pipeline:
![Frontend Flow](../../_static/compilation-pipeline/frontend_flow.svg)
![Frontend Flow](../_static/compilation-pipeline/frontend_flow.svg)
## Tracing
@@ -63,13 +52,11 @@ def f(x):
the goal of tracing is to create the following computation graph without needing any change from the user.
![](../../_static/compilation-pipeline/two_x_plus_three.png)
![](../_static/compilation-pipeline/two_x_plus_three.png)
(Note that the edge labels are for non-commutative operations. To give an example, a subtraction node represents `(predecessor with edge label 0) - (predecessor with edge label 1)`)
To do this, we make use of Tracers, which are objects that record the operation performed during their creation.
We create a `Tracer` for each argument of the function and call the function with those tracers.
Tracers make use of operator overloading feature of Python to achieve their goal.
To do this, we make use of Tracers, which are objects that record the operation performed during their creation. We create a `Tracer` for each argument of the function and call the function with those tracers. Tracers make use of operator overloading feature of Python to achieve their goal.
Here is an example:
@@ -83,16 +70,11 @@ y = Tracer(computation=Input("y"))
resulting_tracer = f(x, y)
```
`2 * y` will be performed first, and `*` is overloaded for `Tracer` to return another tracer:
`Tracer(computation=Multiply(Constant(2), self.computation))` which is equal to:
`Tracer(computation=Multiply(Constant(2), Input("y")))`
`2 * y` will be performed first, and `*` is overloaded for `Tracer` to return another tracer: `Tracer(computation=Multiply(Constant(2), self.computation))` which is equal to: `Tracer(computation=Multiply(Constant(2), Input("y")))`
`x + (2 * y)` will be performed next, and `+` is overloaded for `Tracer` to return another tracer:
`Tracer(computation=Add(self.computation, (2 * y).computation))` which is equal to:
`Tracer(computation=Add(Input("x"), Multiply(Constant(2), Input("y")))`
`x + (2 * y)` will be performed next, and `+` is overloaded for `Tracer` to return another tracer: `Tracer(computation=Add(self.computation, (2 * y).computation))` which is equal to: `Tracer(computation=Add(Input("x"), Multiply(Constant(2), Input("y")))`
In the end, we will have output Tracers that can be used to create the computation graph.
The implementation is a bit more complex than this, but the idea is the same.
In the end, we will have output Tracers that can be used to create the computation graph. The implementation is a bit more complex than this, but the idea is the same.
Tracing is also responsible for indicating whether the values in the node would be encrypted or not, and the rule for that is if a node has an encrypted predecessor, it is encrypted as well.
@@ -100,15 +82,13 @@ Tracing is also responsible for indicating whether the values in the node would
The goal of topological transforms is to make more functions compilable.
With the current version of **Concrete Numpy**, floating point inputs and floating point outputs are not supported.
However, if the floating points operations are intermediate operations, they can sometimes be fused into a single table lookup from integer to integer thanks to some specific transforms.
With the current version of **Concrete Numpy**, floating point inputs and floating point outputs are not supported. However, if the floating points operations are intermediate operations, they can sometimes be fused into a single table lookup from integer to integer thanks to some specific transforms.
Let's take a closer look at the transforms we can currently perform.
### Fusing floating point operations
We have allocated a whole new chapter to explaining float fusing.
You can find it [here](./float-fusing.md).
We have allocated a whole new chapter to explaining float fusing. You can find it [here](float-fusing.md).
## Bounds measurement
@@ -126,16 +106,13 @@ Let's take a closer look at how we perform bounds measurement.
This is a simple approach that requires an inputset to be provided by the user.
The inputset is not to be confused with the dataset which is classical in ML, as it doesn't require labels.
Rather, it is a set of values which are typical inputs of the function.
The inputset is not to be confused with the dataset which is classical in ML, as it doesn't require labels. Rather, it is a set of values which are typical inputs of the function.
The idea is to evaluate each input in the inputset and record the result of each operation in the computation graph.
Then we compare the evaluation results with the current minimum/maximum values of each node and update the minimum/maximum accordingly.
After the entire inputset is evaluated, we assign a data type to each node using the minimum and the maximum value it contains.
The idea is to evaluate each input in the inputset and record the result of each operation in the computation graph. Then we compare the evaluation results with the current minimum/maximum values of each node and update the minimum/maximum accordingly. After the entire inputset is evaluated, we assign a data type to each node using the minimum and the maximum value it contains.
Here is an example, given this computation graph where `x` is encrypted:
![](../../_static/compilation-pipeline/two_x_plus_three.png)
![](../_static/compilation-pipeline/two_x_plus_three.png)
and this inputset:
@@ -144,57 +121,64 @@ and this inputset:
```
Evaluation Result of `2`:
- `x`: 2
- `2`: 2
- `*`: 4
- `3`: 3
- `+`: 7
* `x`: 2
* `2`: 2
* `*`: 4
* `3`: 3
* `+`: 7
New Bounds:
- `x`: [**2**, **2**]
- `2`: [**2**, **2**]
- `*`: [**4**, **4**]
- `3`: [**3**, **3**]
- `+`: [**7**, **7**]
* `x`: \[**2**, **2**]
* `2`: \[**2**, **2**]
* `*`: \[**4**, **4**]
* `3`: \[**3**, **3**]
* `+`: \[**7**, **7**]
Evaluation Result of `3`:
- `x`: 3
- `2`: 2
- `*`: 6
- `3`: 3
- `+`: 9
* `x`: 3
* `2`: 2
* `*`: 6
* `3`: 3
* `+`: 9
New Bounds:
- `x`: [2, **3**]
- `2`: [2, 2]
- `*`: [4, **6**]
- `3`: [3, 3]
- `+`: [7, **9**]
* `x`: \[2, **3**]
* `2`: \[2, 2]
* `*`: \[4, **6**]
* `3`: \[3, 3]
* `+`: \[7, **9**]
Evaluation Result of `1`:
- `x`: 1
- `2`: 2
- `*`: 2
- `3`: 3
- `+`: 5
* `x`: 1
* `2`: 2
* `*`: 2
* `3`: 3
* `+`: 5
New Bounds:
- `x`: [**1**, 3]
- `2`: [2, 2]
- `*`: [**2**, 6]
- `3`: [3, 3]
- `+`: [**5**, 9]
* `x`: \[**1**, 3]
* `2`: \[2, 2]
* `*`: \[**2**, 6]
* `3`: \[3, 3]
* `+`: \[**5**, 9]
Assigned Data Types:
- `x`: Encrypted\<**uint2**>
- `2`: Clear\<**uint2**>
- `*`: Encrypted\<**uint3**>
- `3`: Clear\<**uint2**>
- `+`: Encrypted\<**uint4**>
* `x`: Encrypted<**uint2**>
* `2`: Clear<**uint2**>
* `*`: Encrypted<**uint3**>
* `3`: Clear<**uint2**>
* `+`: Encrypted<**uint4**>
## MLIR conversion
The actual compilation will be done by the **Concrete** compiler, which is expecting an MLIR input. The MLIR conversion goes from a computation graph to its MLIR equivalent. You can read more about it [here](./mlir.md)
The actual compilation will be done by the **Concrete** compiler, which is expecting an MLIR input. The MLIR conversion goes from a computation graph to its MLIR equivalent. You can read more about it [here](mlir.md)
## Example walkthrough #1
@@ -213,7 +197,7 @@ x = "encrypted"
#### Corresponding computation graph
![](../../_static/compilation-pipeline/two_x_plus_three.png)
![](../_static/compilation-pipeline/two_x_plus_three.png)
### Topological transforms
@@ -221,14 +205,15 @@ x = "encrypted"
This transform isn't applied since the computation doesn't involve any floating point operations.
### Bounds measurement using [2, 3, 1] as inputset (same settings as above)
### Bounds measurement using \[2, 3, 1] as inputset (same settings as above)
Data Types:
- `x`: Encrypted\<**uint2**>
- `2`: Clear\<**uint2**>
- `*`: Encrypted\<**uint3**>
- `3`: Clear\<**uint2**>
- `+`: Encrypted\<**uint4**>
* `x`: Encrypted<**uint2**>
* `2`: Clear<**uint2**>
* `*`: Encrypted<**uint3**>
* `3`: Clear<**uint2**>
* `+`: Encrypted<**uint4**>
### MLIR lowering
@@ -244,7 +229,6 @@ module {
}
```
## Example walkthrough #2
### Function to homomorphize
@@ -263,7 +247,7 @@ y = "encrypted"
#### Corresponding computation graph
![](../../_static/compilation-pipeline/forty_two_minus_x_plus_y_times_two.png)
![](../_static/compilation-pipeline/forty_two_minus_x_plus_y_times_two.png)
### Topological transforms
@@ -271,61 +255,67 @@ y = "encrypted"
This transform isn't applied since the computation doesn't involve any floating point operations.
### Bounds measurement using [(6, 0), (5, 1), (3, 0), (4, 1)] as inputset
### Bounds measurement using \[(6, 0), (5, 1), (3, 0), (4, 1)] as inputset
Evaluation Result of `(6, 0)`:
- `42`: 42
- `x`: 6
- `y`: 0
- `2`: 2
- `-`: 36
- `*`: 0
- `+`: 36
* `42`: 42
* `x`: 6
* `y`: 0
* `2`: 2
* `-`: 36
* `*`: 0
* `+`: 36
Evaluation Result of `(5, 1)`:
- `42`: 42
- `x`: 5
- `y`: 1
- `2`: 2
- `-`: 37
- `*`: 2
- `+`: 39
* `42`: 42
* `x`: 5
* `y`: 1
* `2`: 2
* `-`: 37
* `*`: 2
* `+`: 39
Evaluation Result of `(3, 0)`:
- `42`: 42
- `x`: 3
- `y`: 0
- `2`: 2
- `-`: 39
- `*`: 0
- `+`: 39
* `42`: 42
* `x`: 3
* `y`: 0
* `2`: 2
* `-`: 39
* `*`: 0
* `+`: 39
Evaluation Result of `(4, 1)`:
- `42`: 42
- `x`: 4
- `y`: 1
- `2`: 2
- `-`: 38
- `*`: 2
- `+`: 40
* `42`: 42
* `x`: 4
* `y`: 1
* `2`: 2
* `-`: 38
* `*`: 2
* `+`: 40
Bounds:
- `42`: [42, 42]
- `x`: [3, 6]
- `y`: [0, 1]
- `2`: [2, 2]
- `-`: [36, 39]
- `*`: [0, 2]
- `+`: [36, 40]
* `42`: \[42, 42]
* `x`: \[3, 6]
* `y`: \[0, 1]
* `2`: \[2, 2]
* `-`: \[36, 39]
* `*`: \[0, 2]
* `+`: \[36, 40]
Data Types:
- `42`: Clear\<**uint6**>
- `x`: Encrypted\<**uint3**>
- `y`: Encrypted\<**uint1**>
- `2`: Clear\<**uint2**>
- `-`: Encrypted\<**uint6**>
- `*`: Encrypted\<**uint2**>
- `+`: Encrypted\<**uint6**>
* `42`: Clear<**uint6**>
* `x`: Encrypted<**uint3**>
* `y`: Encrypted<**uint1**>
* `2`: Clear<**uint2**>
* `-`: Encrypted<**uint6**>
* `*`: Encrypted<**uint2**>
* `+`: Encrypted<**uint6**>
### MLIR lowering

View File

@@ -1,11 +1,11 @@
# Contributing
```{important}
{% hint style='info' %}
There are two ways to contribute to **concrete-numpy** or to **Concrete** tools in general:
- you can open issues to report bugs and typos and to suggest ideas
- you can ask to become an official contributor by emailing hello@zama.ai. Only approved contributors can send pull requests, so please make sure to get in touch before you do!
```
{% endhint %}
Let's go over some other important things that you need to be careful about.
@@ -70,9 +70,9 @@ To learn more about conventional commits, check [this](https://www.conventionalc
## Before creating pull request
```{important}
{% hint style='tip' %}
We remind that only official contributors can send pull requests. To become such an official contributor, please email hello@zama.ai.
```
{% endhint %}
You should rebase on top of `main` branch before you create your pull request. We don't allow merge commits, so rebasing on `main` before pushing gives you the best chance of avoiding having to rewrite parts of your PR later if some conflicts arise with other PRs being merged. After you commit your changes to your new branch, you can use the following commands to rebase:

View File

@@ -1,10 +0,0 @@
Explanations
============
.. toctree::
:maxdepth: 1
compilation.md
terminology_and_structure.md
float-fusing.md
mlir.md

View File

@@ -1,3 +0,0 @@
# MLIR
TODO

View File

@@ -25,15 +25,15 @@ Any computation where there is a single variable integer input and a single inte
The `quantized_sin` graph of operations:
![](../../_static/float_fusing_example/before.png)
![](../_static/float_fusing_example/before.png)
The float subgraph that was detected:
![](../../_static/float_fusing_example/subgraph.png)
![](../_static/float_fusing_example/subgraph.png)
The simplified graph of operations with the float subgraph condensed in a `GenericFunction` node:
![](../../_static/float_fusing_example/after.png)
![](../_static/float_fusing_example/after.png)
## How is it done in **Concrete Numpy**?
@@ -59,15 +59,15 @@ def fusable_with_bigger_search(x, y):
The `fusable_with_bigger_search` graph of operations:
![](../../_static/float_fusing_example/before_bigger_search.png)
![](../_static/float_fusing_example/before_bigger_search.png)
The float subgraph that was detected:
![](../../_static/float_fusing_example/subgraph_bigger_search.png)
![](../_static/float_fusing_example/subgraph_bigger_search.png)
The simplified graph of operations with the float subgraph condensed in a `GenericFunction` node:
![](../../_static/float_fusing_example/after_bigger_search.png)
![](../_static/float_fusing_example/after_bigger_search.png)
An example of a non fusable computation with that technique is:
@@ -83,4 +83,4 @@ def non_fusable(x, y):
return add_int
```
From `add_int` you will find two `Add` nodes going from int to float (`x_1` and `y_1`) which we cannot represent with a single input table look-up. KolmogorovArnold representation theorem states that every multivariate continuous function can be represented as a superposition of continuous functions of one variable ([from Wikipedia](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Arnold_representation_theorem)), so the above case could be handled in future versions of **Concrete** tools.
From `add_int` you will find two `Add` nodes going from int to float (`x_1` and `y_1`) which we cannot represent with a single input table look-up. KolmogorovArnold representation theorem states that every multivariate continuous function can be represented as a superposition of continuous functions of one variable ([from Wikipedia](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Arnold\_representation\_theorem)), so the above case could be handled in future versions of **Concrete** tools.

View File

@@ -1,27 +0,0 @@
# Documenting
## Using Sphinx
One can simply create docs with Sphinx and open them, by doing:
```shell
make docs
```
Reminder that this needs to be done in docker.
The documentation contains both files written by hand by developers (the .md files) and files automatically created by parsing the source files.
### Opening doc
On macOS, you can do
```shell
make open_docs
```
On other systems, simply open `docs/_build/html/index.html`

View File

@@ -1,11 +0,0 @@
How To
======
.. toctree::
:maxdepth: 1
project_setup.md
docker.md
documenting.md
releasing.md
contributing.md

View File

@@ -1,9 +0,0 @@
Developer Guide
===============
.. toctree::
:maxdepth: 2
How To <howto/index>
Explanations <explanation/index>

17
docs/dev/mlir.md Normal file
View File

@@ -0,0 +1,17 @@
# MLIR
The MLIR project is a sub-project of the LLVM project. It's designed to simplify building domain-specific compilers such as ours: Concrete Compiler.
Concrete Compiler accepts MLIR as input and emits compiled assembly code for the target architecture.
Concrete NumPy does the MLIR generation from the computation graph. Code related to this conversion is in `concrete/numpy/mlir` folder.
The conversion can be performed using `convert` method of `GraphConverter` class.
Within `convert` method of `GraphConverter`:
* MLIR compatibility of the graph is checked
* Bit-width constraints are checked
* Negative lookup tables are offsetted
* Computation graph is traversed and each node is converted to their corresponding MLIR representation using `NodeConverter` class
* String representation of resulting MLIR is returned

View File

@@ -1,10 +1,10 @@
# Project Setup
```{note}
{% hint style='info' %}
It is strongly recommended to use the development docker (see the [docker](./docker.md) guide). However you can setup the project on bare macOS and Linux provided you install the required dependencies (check Dockerfile.env for the required binary packages like make).
The project targets Python 3.8 through 3.9 inclusive.
```
{% endhint %}
## Installing Python
@@ -18,11 +18,11 @@ You can follow [this](https://realpython.com/installing-python/) guide to instal
You can follow [this](https://python-poetry.org/docs/#installation) official guide to install it.
```{WARNING}
{% hint style='danger' %}
As there is no `concrete-compiler` package for Windows, only the dev dependencies can be installed. This requires poetry >= 1.2.
At the time of writing (January 2022), there is only an alpha version of poetry 1.2 that you can install. In the meantime we recommend following [this link to setup the docker environment](./docker.md) on Windows.
```
{% endhint %}
## Installing make
@@ -45,9 +45,9 @@ It is possible to install `gmake` as `make`, check this [StackOverflow post](htt
On Windows check [this GitHub gist](https://gist.github.com/evanwill/0207876c3243bbb6863e65ec5dc3f058#make).
```{hint}
{% hint style='tip' %}
In the following sections, be sure to use the proper `make` tool for your system: `make`, `gmake`, or other.
```
{% endhint %}
## Cloning repository
@@ -59,9 +59,9 @@ Clone the code repository using the link for your favourite communication protoc
We are going to make use of virtual environments. This helps to keep the project isolated from other `Python` projects in the system. The following commands will create a new virtual environment under the project directory and install dependencies to it.
```{DANGER}
{% hint style='danger' %}
The following command will not work on Windows if you don't have poetry >= 1.2. As poetry 1.2 is still in alpha we recommend following [this link to setup the docker environment](./docker.md) instead.
```
{% endhint %}
```shell
cd concrete-numpy

View File

@@ -1,19 +1,21 @@
# Debugging / Support / Submitting Issues
This version of **Concrete Numpy** is a first version of the product, meaning that it is not completely finished, contains several bugs (would they be known or unknown at this time), and will improve over time with feedback from early users.
This version of **Concrete Numpy** is a first version of the product, meaning that it is not completely finished, contains several bugs (would they be known or unknown at this time), and will improve over time with feedback from early users.
Here are some ways to debug your problems. If nothing seems conclusive, you can still report the issue, as explained in a later section of this page.
## Is it a bug by the framework or by the user?
If ever your numpy program fails, it may be because:
- of bugs due to **Concrete**
- of bugs due to the user, notably who would have a bug without even considering FHE (does the function you want to compile run well with numpy?), or who would not use the framework as expected or not consider the limits of the framework.
* of bugs due to **Concrete**
* of bugs due to the user, notably who would have a bug without even considering FHE (does the function you want to compile run well with numpy?), or who would not use the framework as expected or not consider the limits of the framework.
For the latter kind of bugs, we encourage the user to have a look at:
- the error message received
- the documentation of the product
- the known limits of the product (such as the reduced set of supported operations at this time, or the limited precision of the computations).
* the error message received
* the documentation of the product
* the known limits of the product (such as the reduced set of supported operations at this time, or the limited precision of the computations).
Once you have tried to see if the bug was not your own, it is time to go further.
@@ -26,8 +28,9 @@ So, in general, when a bug appears, it may be a good idea to enlarge the inputse
## Having a reproducible bug
Once you're sure it is a bug, it would be nice to try to:
- make it highly reproducible: e.g., by reducing as much the randomness as possible; e.g., if you can find an input which fails, there is no reason to let the input random
- reduce it to the smallest possible bug: it is easier to investigate bugs which are small, so when you have an issue, please try to reduce to a smaller issue, notably with less lines of code, smaller parameters, less complex function to compile, faster scripts etc.
* make it highly reproducible: e.g., by reducing as much the randomness as possible; e.g., if you can find an input which fails, there is no reason to let the input random
* reduce it to the smallest possible bug: it is easier to investigate bugs which are small, so when you have an issue, please try to reduce to a smaller issue, notably with less lines of code, smaller parameters, less complex function to compile, faster scripts etc.
## Asking the community
@@ -37,37 +40,37 @@ Hopefully, it is just a misunderstanding or a small mistake on your side, that w
## Having a look to the compilation artifacts
When things are more complicated, or if you want to have a look by yourself, you may want to start with the compilation reports, which are called artifacts. This is as simple as described in [here](../tutorial/compilation_artifacts.md)
When things are more complicated, or if you want to have a look by yourself, you may want to start with the compilation reports, which are called artifacts. This is as simple as described in [here](../tutorial/compilation\_artifacts.md)
The artifact system will create a directory, containing:
- **environment.txt:** information about your system
- **requirements.txt:** information about your python dependencies
- **function.txt:** source code of the function you are compiling
- **parameters.txt:** parameters you specified for compilation
- **1.initial.graph.txt:** textual representation of the initial computation graph right after tracing
- **1.initial.graph.png:** visual representation of the initial computation graph right after tracing
- ...
- **X.description.graph.txt:** textual representation of the Xth computation graph after topological transforms
- **X.description.graph.png:** visual representation of the Xth computation graph after topological transforms
- ...
- **N.final.graph.txt:** textual representation of the final computation graph right before MLIR conversion
- **N.final.graph.png:** visual representation of the final computation graph right before MLIR conversion
- **bounds.txt:** ranges of data in the different steps of the computation for the final graph that is being compiled
- **mlir.txt**: resulting MLIR code that is sent to the compiler (if compilation succeeded)
- **traceback.txt**: information about the error you encountered (if compilation failed)
* **environment.txt:** information about your system
* **requirements.txt:** information about your python dependencies
* **function.txt:** source code of the function you are compiling
* **parameters.txt:** parameters you specified for compilation
* **1.initial.graph.txt:** textual representation of the initial computation graph right after tracing
* **1.initial.graph.png:** visual representation of the initial computation graph right after tracing
* ...
* **X.description.graph.txt:** textual representation of the Xth computation graph after topological transforms
* **X.description.graph.png:** visual representation of the Xth computation graph after topological transforms
* ...
* **N.final.graph.txt:** textual representation of the final computation graph right before MLIR conversion
* **N.final.graph.png:** visual representation of the final computation graph right before MLIR conversion
* **bounds.txt:** ranges of data in the different steps of the computation for the final graph that is being compiled
* **mlir.txt**: resulting MLIR code that is sent to the compiler (if compilation succeeded)
* **traceback.txt**: information about the error you encountered (if compilation failed)
Attaching the artifact with your issue or Slack message may help people to have a look at the core of the problem.
The more precise your bug, the more likely we can reproduce and fix it.
Attaching the artifact with your issue or Slack message may help people to have a look at the core of the problem. The more precise your bug, the more likely we can reproduce and fix it.
To simplify our work and let us reproduce your bug easily, we need all the information we can get. So, in addition to your python script, the following information would be very useful:
- compilation artifacts
- reproducibility rate you see on your side
- any insight you might have on the bug
- any workaround you have been able to find
Remember, **Concrete Numpy** is a project where we are open to contributions, more information at [Contributing](../../dev/howto/contributing.md).
* compilation artifacts
* reproducibility rate you see on your side
* any insight you might have on the bug
* any workaround you have been able to find
Remember, **Concrete Numpy** is a project where we are open to contributions, more information at [Contributing](../developer/contributing.md).
## Submitting an issue
In case you have a bug, which is reproducible, that you have reduced to a small piece of code, we have our issue tracker (link on the right of the top menu). Remember that a well-described short issue is an issue which is more likely to be studied and fixed. The more issues we receive, the better the product will be.
In case you have a bug, which is reproducible, that you have reduced to a small piece of code, we have our issue tracker (link on the right of the top menu). Remember that a well-described short issue is an issue which is more likely to be studied and fixed. The more issues we receive, the better the product will be.

View File

@@ -1,4 +1,4 @@
# Printing and Drawing a FHE circuit
# Printing and drawing
Sometimes, it can be useful to print or draw fhe circuits, we provide methods to just do that. Please read [Compiling and Executing](../basics/compiling_and_executing.md) before reading further to see how you can compile your function into an fhe circuit.
@@ -13,10 +13,10 @@ print(circuit)
## Drawing
```{WARNING}
{% hint style="danger" %}
The draw function requires the installation of the package's extra dependencies.
The drawing package required is `pygraphviz` which needs `graphviz` packages installed on your OS, see <a href="https://pygraphviz.github.io/documentation/stable/install.html">https://pygraphviz.github.io/documentation/stable/install.html</a>
The drawing package required is `pygraphviz` which needs `graphviz` packages installed on your OS, see [https://pygraphviz.github.io/documentation/stable/install.html](https://pygraphviz.github.io/documentation/stable/install.html)
To install the required drawing packages once you have `graphviz` installed run:
@@ -25,7 +25,7 @@ To install the required drawing packages once you have `graphviz` installed run:
You may need to force reinstallation
`pip install --force-reinstall concrete-numpy[full]`
```
{% endhint %}
To draw your circuit, you can do the following:

View File

@@ -67,9 +67,9 @@ Quantization and binarization increase inference speed, reduce model byte-size a
The end result has a granularity/imprecision linked to the data types used and for the Quantized Logistic Regression to the lattice used to evaluate the logistic model.
## Limitations for FHE friendly neural network
## Limitations for FHE friendly neural network
Recent quantization literature often takes a few shortcuts to reach performance similar to those achieved by floating point models. A common one is that the input is left in floating point. This is also true for the first and last layers which have more impact on the resulting model accuracy than hidden layers.
Recent quantization literature often takes a few shortcuts to reach performance similar to those achieved by floating point models. A common one is that the input is left in floating point. This is also true for the first and last layers which have more impact on the resulting model accuracy than hidden layers.
But, in **Concrete Numpy**, the inputs, weights and the accumulator must remain on a maximum of 7 bits.
@@ -77,14 +77,14 @@ Thus, in **Concrete Numpy**, we also quantize the input data and network output
The core operation in neural networks is essentially matrix multiplications (matmul). This operation must be done such that the maximum value of its result requires at most 7 bits of precision.
For example, if you quantize your input and weights with $ n_{\mathsf{weights}} $, $ n_{\mathsf{inputs}} $ bits of precision, one can compute the maximum dimensionality of the input and weights before the matmul **can** exceed the 7 bits as such:
For example, if you quantize your input and weights with $$ n_{\mathsf{weights}} $$, $$ n_{\mathsf{inputs}} $$ bits of precision, one can compute the maximum dimensionality of the input and weights before the matmul **can** exceed the 7 bits as such:
$$ \Omega = \mathsf{floor} \left( \frac{2^{n_{\mathsf{max}}} - 1}{(2^{n_{\mathsf{weights}}} - 1)(2^{n_{\mathsf{inputs}}} - 1)} \right) $$
where $ n_{\mathsf{max}} = 7 $ is the maximum precision allowed. For example, if we set $ n_{\mathsf{weights}} = 2$ and $ n_{\mathsf{inputs}} = 2$ with $ n_{\mathsf{max}} = 7$ then we have the $ \Omega = 14 $ different inputs/weights allowed in the matmul.
where $$ n_{\mathsf{max}} = 7 $$ is the maximum precision allowed. For example, if we set $$ n_{\mathsf{weights}} = 2$$ and $$ n_{\mathsf{inputs}} = 2$$ with $$ n_{\mathsf{max}} = 7$$ then we have the $$ \Omega = 14 $$ different inputs/weights allowed in the matmul.
Above $ \Omega $ dimensions in the input and weights, the risk of overflow increases quickly. It may happen that for some distributions of weights and values the computation does not overflow, but the risk increases rapidly with the number of dimensions.
Above $$ \Omega $$ dimensions in the input and weights, the risk of overflow increases quickly. It may happen that for some distributions of weights and values the computation does not overflow, but the risk increases rapidly with the number of dimensions.
Currently, **Concrete Numpy** pre-computes the number of bits needed for the computation depending on the input set calibration data and does not allow the overflow[^1] to happen.
[^1]: [Integer overflow](https://en.wikipedia.org/wiki/Integer_overflow)
[^1]: [Integer overflow](https://en.wikipedia.org/wiki/Integer_overflow)

View File

@@ -8,7 +8,7 @@ FHE is a powerful cryptographic tool, which allows servers to perform computatio
FHE is also a killer feature regarding data breaches: as anything done on the server is done over encrypted data, even if the server is compromised, there is in the end no leak of useful data.
With **Concrete Numpy**, data scientists can implement machine learning models using a [subset of numpy](../howto/numpy_support.md) that compile to FHE. They will be able to train models with popular machine learning libraries and then convert the prediction functions of these models, that they write in numpy, to FHE.
With **Concrete Numpy**, data scientists can implement machine learning models using a [subset of numpy](basics/numpy_support.md) that compile to FHE. They will be able to train models with popular machine learning libraries and then convert the prediction functions of these models, that they write in numpy, to FHE.
**Concrete Numpy** is made of several parts:
- an entry API, which is the main function of the so-called **Concrete frontend**, which takes programs made from a subset of numpy, and converts them to an FHE program
@@ -26,6 +26,6 @@ Basically, we have divided our documentation into several parts:
## A work in progress
```{note}
Concrete is a work in progress, and is currently limited to a certain number of operators and features. In the future, there will be improvements as described in this [section](../explanation/future_features.md).
```
{% hint style='info' %}
Concrete is a work in progress, and is currently limited to a certain number of operators and features. In the future, there will be improvements as described in this [section](explanation/future_features.md).
{% endhint %}

View File

@@ -1,35 +0,0 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

View File

@@ -1,6 +1,6 @@
# Compilation Artifacts
# Compilation artifacts
In this tutorial, we are going to go over the artifact system, which is designed to inspect/debug the compilation process easily.
In this tutorial, we are going to go over the artifact system, which is designed to inspect/debug the compilation process easily.
## Automatic export
@@ -68,7 +68,7 @@ return %1
This file contains the visual representation of the initial computation graph right after tracing.
![](../../_static/tutorials/artifacts/auto/1.initial.graph.png)
![](../_static/tutorials/artifacts/auto/1.initial.graph.png)
### 2.final.graph.txt
@@ -84,7 +84,7 @@ return %1
This file contains the visual representation of the final computation graph right before MLIR conversion.
![](../../_static/tutorials/artifacts/auto/2.final.graph.png)
![](../_static/tutorials/artifacts/auto/2.final.graph.png)
### traceback.txt
@@ -151,7 +151,7 @@ return %8
This file contains the visual representation of the initial computation graph right after tracing.
![](../../_static/tutorials/artifacts/manual/1.initial.graph.png)
![](../_static/tutorials/artifacts/manual/1.initial.graph.png)
### 2.after-float-fuse-0.graph.txt
@@ -182,7 +182,7 @@ Subgraphs:
This file contains the visual representation of the intermediate computation graph after fusing.
![](../../_static/tutorials/artifacts/manual/2.after-float-fuse-0.graph.png)
![](../_static/tutorials/artifacts/manual/2.after-float-fuse-0.graph.png)
### 3.final.graph.txt
@@ -213,7 +213,7 @@ Subgraphs:
This file contains the visual representation of the final computation graph right before MLIR conversion.
![](../../_static/tutorials/artifacts/manual/3.final.graph.png)
![](../_static/tutorials/artifacts/manual/3.final.graph.png)
### bounds.txt
@@ -242,7 +242,6 @@ module {
return %1 : !FHE.eint<7>
}
}
```
You can learn more about MLIR [here](../../dev/explanation/mlir.md).

View File

@@ -84,9 +84,9 @@ assert np.array_equal(circuit.encrypt_run_decrypt(test_input), expected_output)
You can use multidimensional slicing as well.
#### Note
{% hint style='tip' %}
There are certain limitations of slicing due to MLIR. So if you stumple into `RuntimeError: Compilation failed: Failed to lower to LLVM dialect`, know that we are aware of it, and we are trying to make such cases compilable.
{% endhint %}
## Dynamic Indexing

View File

@@ -1,6 +1,6 @@
# Table Lookup
# Table lookup
In this tutorial, we are going to go over the ways to perform direct table lookups in **Concrete Numpy**. Please read [Compiling and Executing](../basics/compiling_and_executing.md) before reading further to see how you can compile the functions below.
In this tutorial, we are going to go over the ways to perform direct table lookups in **Concrete Numpy**. Please read [Compiling and Executing](../basics/compiling\_and\_executing.md) before reading further to see how you can compile the functions below.
## Direct table lookup
@@ -17,7 +17,7 @@ def f(x):
where
- `x = "encrypted"` scalar
* `x = "encrypted"` scalar
results in
@@ -31,7 +31,7 @@ circuit.encrypt_run_decrypt(3) == 0
Moreover, direct lookup tables can be used with tensors where the same table lookup is applied to each value in the tensor, so
- `x = "encrypted"` tensor of shape `(2, 3)`
* `x = "encrypted"` tensor of shape `(2, 3)`
results in
@@ -41,8 +41,7 @@ input = np.array([[0, 1, 3], [2, 3, 1]], dtype=np.uint8)
circuit.encrypt_run_decrypt(input) == [[2, 1, 0], [3, 0, 1]]
```
Direct table lookups behaves like array indexing in python.
Which means, if the lookup variable is negative, table is looked up from the back.
Direct table lookups behaves like array indexing in python. Which means, if the lookup variable is negative, table is looked up from the back.
```python
import concrete.numpy as cnp
@@ -55,7 +54,7 @@ def f(x):
where
- `x = "encrypted"` scalar
* `x = "encrypted"` scalar
results in
@@ -68,7 +67,7 @@ circuit.encrypt_run_decrypt(3) == 1
circuit.encrypt_run_decrypt(4) == 2
```
Lastly, a `LookupTable` can have any number of elements, let's call it **N**, as long as the lookup variable is in range [-**N**, **N**). If you go out of bounds of this range, you will get the following error:
Lastly, a `LookupTable` can have any number of elements, let's call it **N**, as long as the lookup variable is in range \[-**N**, **N**). If you go out of bounds of this range, you will get the following error:
```
IndexError: index 10 is out of bounds for axis 0 with size 6
@@ -99,7 +98,7 @@ def f(x):
where
- `x = "encrypted"` tensor of shape `(3, 2)`
* `x = "encrypted"` tensor of shape `(3, 2)`
results in
@@ -113,7 +112,7 @@ Basically, we applied `squared` table to the first column and `cubed` to the sec
## Fused table lookup
Direct tables are tedious to prepare by hand. When possible, **Concrete Numpy** fuses the floating point operations into table lookups automatically. There are some limitations on fusing operations, which you can learn more about on the next tutorial, [Working With Floating Points](./working_with_floating_points.md).
Direct tables are tedious to prepare by hand. When possible, **Concrete Numpy** fuses the floating point operations into table lookups automatically. There are some limitations on fusing operations, which you can learn more about on the next tutorial, [Working With Floating Points](working\_with\_floating\_points.md).
Here is an example function that results in fused table lookup:
@@ -125,7 +124,7 @@ def f(x):
where
- `x = "encrypted"` scalar
* `x = "encrypted"` scalar
results in
@@ -143,11 +142,11 @@ circuit.encrypt_run_decrypt(7) == 45
Initially, the function is converted to this operation graph
![](../../_static/tutorials/table-lookup/1.initial.graph.png)
![](../\_static/tutorials/table-lookup/1.initial.graph.png)
and after floating point operations are fused, we get the following operation graph
![](../../_static/tutorials/table-lookup/3.final.graph.png)
![](../\_static/tutorials/table-lookup/3.final.graph.png)
Internally, it uses the following lookup table

View File

@@ -1,4 +1,4 @@
# Working With Floating Points
# Working with floating points
## An example
@@ -23,8 +23,7 @@ print(circuit.encrypt_run_decrypt(60) == f(60))
print("All good!")
```
One can look to [numpy supported functions](../howto/numpy_support.md) for information about possible float operations.
You can look to [numpy supported functions](../howto/numpy\_support.md) for information about possible float operations.
## Limitations
@@ -34,4 +33,4 @@ This biggest one is that, because floating point operations are fused into table
To give a precise example, `100 - np.fabs(50 * (np.sin(x) + np.sin(y)))` cannot be compiled because the floating point part depends on both `x` and `y` (i.e., it cannot be rewritten in the form `100 - table[z]` for a `z` that could be computed easily from `x` and `y`).
To dive into implementation details, you may refer to [Fusing Floating Point Operations](../../dev/explanation/float-fusing.md) document.
To dive into implementation details, you may refer to [Fusing Floating Point Operations](../../developer/float-fusing.md) document.

View File

@@ -1,9 +0,0 @@
Getting Started
===============
.. toctree::
:maxdepth: 1
intro.md
installing.md
compiling_and_executing.md

View File

@@ -1,9 +0,0 @@
Explanations
============
.. toctree::
:maxdepth: 1
what_is_fhe.md
fhe_and_framework_limits.md
future_features.md

View File

@@ -1,28 +0,0 @@
# FAQ
## What is **Concrete Numpy**?
See [here](../basics/intro.md). Also, you can have a look to Zama's [website](https://zama.ai) or to the [Concrete library GitHub](https://github.com/zama-ai/concrete).
## Is it an open source project?
Our mission at Zama is to protect peoples privacy by preventing data breaches and unethical surveillance.
Following a recent breakthrough in fully homomorphic encryption, we are building a deep learning framework that enables fast and accurate inference over encrypted data, with minimal performance overhead, no changes to the network architecture, and no retraining necessary. Zama is open-source by design, as we believe privacy-enabling technologies should benefit the widest possible community of developers and researchers. If you are interested in the details, be sure to read our licensing agreement in the root of the repository. In a nutshell, if you plan to use **Concrete Numpy** for non-commercial purposes, we welcome you and want to hear about all the exciting things you do with FHE!
## Can I use it freely?
See our license in the root of the repository.
## Can I contribute?
See this [section](../../dev/howto/contributing.md).
## What are the future features of **Concrete Numpy**?
See this [section](../explanation/future_features.md).
## I don't find the answer to my question. What should I do?
The best is to ask your question on Discourse, which you can access [here](https://community.zama.ai) or by clicking the Discourse logo in the top right corner of every page of this documentation.

View File

@@ -1,11 +0,0 @@
How To
======
.. toctree::
:maxdepth: 1
numpy_support.md
printing_and_drawing.md
reduce_needed_precision.md
debug_support_submit_issues.md
faq.md

View File

@@ -1,10 +0,0 @@
User guide
==========
.. toctree::
:maxdepth: 2
Getting started <basics/index>
Tutorial <tutorial/index>
How To <howto/index>
Explanations <explanation/index>

View File

@@ -1,11 +0,0 @@
Tutorial
========
.. toctree::
:maxdepth: 1
table_lookup.md
working_with_floating_points.md
indexing.md
tensor_operations.ipynb
compilation_artifacts.md

View File

@@ -1,565 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "34d13212",
"metadata": {},
"source": [
"# Working With Tensors"
]
},
{
"cell_type": "markdown",
"id": "6999361c",
"metadata": {},
"source": [
"In this tutorial, we'll go over what you can do with encrypted tensors. Each supported operation will be written out as a function. Then, all of them will be compiled in a loop and executed with a random input to demonstrate their semantics."
]
},
{
"cell_type": "markdown",
"id": "34fc7213",
"metadata": {},
"source": [
"### Imports"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a62e11a9",
"metadata": {},
"outputs": [],
"source": [
"import concrete.numpy as cnp\n",
"import inspect\n",
"import numpy as np"
]
},
{
"cell_type": "markdown",
"id": "6180966a",
"metadata": {},
"source": [
"### Inputset Definition"
]
},
{
"cell_type": "markdown",
"id": "ab71e23f",
"metadata": {},
"source": [
"We will generate some random input tensors as calibration data for our encrypted tensor functions."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "f8de515c",
"metadata": {},
"outputs": [],
"source": [
"inputset = [np.random.randint(3, 11, size=(3, 2), dtype=np.uint8) for _ in range(10)]"
]
},
{
"cell_type": "markdown",
"id": "ae02c598",
"metadata": {},
"source": [
"### Supported Operation Definitions"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d7eeb83c",
"metadata": {},
"outputs": [],
"source": [
"def reshape(x):\n",
" return x.reshape((2, 3))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "68510258",
"metadata": {},
"outputs": [],
"source": [
"def flatten(x):\n",
" return x.flatten()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "db8f502b",
"metadata": {},
"outputs": [],
"source": [
"def index(x):\n",
" return x[2, 0]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "5e08a6c4",
"metadata": {},
"outputs": [],
"source": [
"def slice_(x):\n",
" return x.flatten()[1:5]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b807cc5d",
"metadata": {},
"outputs": [],
"source": [
"def add_scalar(x):\n",
" return x + 10"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "59471d3a",
"metadata": {},
"outputs": [],
"source": [
"def add_tensor(x):\n",
" return x + np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "83bf7d53",
"metadata": {},
"outputs": [],
"source": [
"def add_tensor_broadcasted(x):\n",
" return x + np.array([1, 10], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "ff42df0b",
"metadata": {},
"outputs": [],
"source": [
"def sub_scalar(x):\n",
" return x + (-1)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "0cc14f94",
"metadata": {},
"outputs": [],
"source": [
"def sub_tensor(x):\n",
" return x + (-np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8))"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "5e83dd23",
"metadata": {},
"outputs": [],
"source": [
"def sub_tensor_broadcasted(x):\n",
" return x + (-np.array([3, 0], dtype=np.uint8))"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "9c68c725",
"metadata": {},
"outputs": [],
"source": [
"def mul_scalar(x):\n",
" return x * 2"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "66d065e0",
"metadata": {},
"outputs": [],
"source": [
"def mul_tensor(x):\n",
" return x * np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "a04ae50b",
"metadata": {},
"outputs": [],
"source": [
"def mul_tensor_broadcasted(x):\n",
" return x * np.array([2, 3], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "39fb823b",
"metadata": {},
"outputs": [],
"source": [
"def power(x):\n",
" return x ** 2"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "4257c1c9",
"metadata": {},
"outputs": [],
"source": [
"def truediv(x):\n",
" return x // 2"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "712b965a",
"metadata": {},
"outputs": [],
"source": [
"def dot(x):\n",
" return x.flatten() @ np.array([1, 1, 1, 2, 1, 1], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "480b6cc7",
"metadata": {},
"outputs": [],
"source": [
"def matmul(x):\n",
" return x @ np.array([[1, 2, 3], [3, 2, 1]], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "b876272b",
"metadata": {},
"outputs": [],
"source": [
"def clip(x):\n",
" return x.clip(6, 11)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "cec1d224",
"metadata": {},
"outputs": [],
"source": [
"def comparison(x):\n",
" return x > np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "668ab894",
"metadata": {},
"outputs": [],
"source": [
"def minimum(x):\n",
" return np.minimum(x, np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8))"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "14031662",
"metadata": {},
"outputs": [],
"source": [
"def maximum(x):\n",
" return np.maximum(x, np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8))"
]
},
{
"cell_type": "markdown",
"id": "12332a5b",
"metadata": {},
"source": [
"Other than these, we support a lot of numpy functions which you can find more about at [Numpy Support](../howto/numpy_support.md)."
]
},
{
"cell_type": "markdown",
"id": "e917b82a",
"metadata": {},
"source": [
"### Prepare Supported Operations List "
]
},
{
"cell_type": "markdown",
"id": "9495a29d",
"metadata": {},
"source": [
"We will create a list of supported operations to showcase them in a loop."
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "0cb14b31",
"metadata": {},
"outputs": [],
"source": [
"supported_operations = [\n",
" reshape,\n",
" flatten,\n",
" index,\n",
" slice_,\n",
" add_scalar,\n",
" add_tensor,\n",
" add_tensor_broadcasted,\n",
" sub_scalar,\n",
" sub_tensor,\n",
" sub_tensor_broadcasted,\n",
" mul_scalar,\n",
" mul_tensor,\n",
" mul_tensor_broadcasted,\n",
" power,\n",
" truediv,\n",
" dot,\n",
" matmul,\n",
" clip,\n",
" comparison,\n",
" maximum,\n",
" minimum,\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "09311480",
"metadata": {},
"source": [
"### Compilation and Homomorphic Evaluation of Supported Operations"
]
},
{
"cell_type": "markdown",
"id": "cf0152a2",
"metadata": {},
"source": [
"Note that some operations require programmable bootstrapping to work and programmable bootstrapping has a certain probability of failure. Usually, it has more than a 99% probability of success but with big bit-widths, this probability can drop to 95%."
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "0cdbc545",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#######################################################################################\n",
"\n",
"def reshape(x):\n",
" return x.reshape((2, 3))\n",
"\n",
"reshape([[3, 6], [5, 6], [9, 10]]) homomorphically evaluates to [[3, 6, 5], [6, 9, 10]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def flatten(x):\n",
" return x.flatten()\n",
"\n",
"flatten([[7, 8], [10, 9], [8, 9]]) homomorphically evaluates to [7, 8, 10, 9, 8, 9]\n",
"\n",
"#######################################################################################\n",
"\n",
"def index(x):\n",
" return x[2, 0]\n",
"\n",
"index([[3, 10], [5, 4], [6, 4]]) homomorphically evaluates to 6\n",
"\n",
"#######################################################################################\n",
"\n",
"def slice_(x):\n",
" return x.flatten()[1:5]\n",
"\n",
"slice_([[5, 7], [5, 6], [9, 5]]) homomorphically evaluates to [7, 5, 6, 9]\n",
"\n",
"#######################################################################################\n",
"\n",
"def add_scalar(x):\n",
" return x + 10\n",
"\n",
"add_scalar([[3, 5], [4, 8], [9, 5]]) homomorphically evaluates to [[13, 15], [14, 18], [19, 15]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def add_tensor(x):\n",
" return x + np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8)\n",
"\n",
"add_tensor([[4, 3], [4, 9], [8, 3]]) homomorphically evaluates to [[5, 5], [7, 12], [10, 4]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def add_tensor_broadcasted(x):\n",
" return x + np.array([1, 10], dtype=np.uint8)\n",
"\n",
"add_tensor_broadcasted([[9, 3], [4, 4], [8, 6]]) homomorphically evaluates to [[10, 13], [5, 14], [9, 16]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def sub_scalar(x):\n",
" return x + (-1)\n",
"\n",
"sub_scalar([[6, 6], [5, 10], [4, 9]]) homomorphically evaluates to [[5, 5], [4, 9], [3, 8]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def sub_tensor(x):\n",
" return x + (-np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8))\n",
"\n",
"sub_tensor([[7, 3], [6, 3], [9, 5]]) homomorphically evaluates to [[6, 1], [3, 0], [7, 4]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def sub_tensor_broadcasted(x):\n",
" return x + (-np.array([3, 0], dtype=np.uint8))\n",
"\n",
"sub_tensor_broadcasted([[6, 7], [10, 6], [3, 10]]) homomorphically evaluates to [[3, 7], [7, 6], [0, 10]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def mul_scalar(x):\n",
" return x * 2\n",
"\n",
"mul_scalar([[10, 4], [8, 6], [7, 7]]) homomorphically evaluates to [[20, 8], [16, 12], [14, 14]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def mul_tensor(x):\n",
" return x * np.array([[1, 2], [3, 3], [2, 1]], dtype=np.uint8)\n",
"\n",
"mul_tensor([[10, 8], [3, 6], [8, 4]]) homomorphically evaluates to [[10, 16], [9, 18], [16, 4]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def mul_tensor_broadcasted(x):\n",
" return x * np.array([2, 3], dtype=np.uint8)\n",
"\n",
"mul_tensor_broadcasted([[4, 5], [9, 7], [9, 5]]) homomorphically evaluates to [[8, 15], [18, 21], [18, 15]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def power(x):\n",
" return x ** 2\n",
"\n",
"power([[10, 9], [9, 10], [8, 7]]) homomorphically evaluates to [[100, 81], [81, 100], [64, 49]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def truediv(x):\n",
" return x // 2\n",
"\n",
"truediv([[10, 7], [7, 7], [4, 8]]) homomorphically evaluates to [[5, 3], [3, 3], [2, 4]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def dot(x):\n",
" return x.flatten() @ np.array([1, 1, 1, 2, 1, 1], dtype=np.uint8)\n",
"\n",
"dot([[3, 10], [4, 7], [7, 6]]) homomorphically evaluates to 44\n",
"\n",
"#######################################################################################\n",
"\n",
"def matmul(x):\n",
" return x @ np.array([[1, 2, 3], [3, 2, 1]], dtype=np.uint8)\n",
"\n",
"matmul([[8, 9], [5, 5], [8, 9]]) homomorphically evaluates to [[35, 34, 33], [20, 20, 20], [35, 34, 33]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def clip(x):\n",
" return x.clip(6, 11)\n",
"\n",
"clip([[3, 4], [4, 4], [8, 7]]) homomorphically evaluates to [[6, 6], [6, 6], [8, 7]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def comparison(x):\n",
" return x > np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8)\n",
"\n",
"comparison([[3, 5], [8, 8], [3, 7]]) homomorphically evaluates to [[0, 0], [0, 0], [0, 0]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def maximum(x):\n",
" return np.maximum(x, np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8))\n",
"\n",
"maximum([[5, 10], [4, 9], [9, 6]]) homomorphically evaluates to [[10, 10], [8, 11], [9, 7]]\n",
"\n",
"#######################################################################################\n",
"\n",
"def minimum(x):\n",
" return np.minimum(x, np.array([[10, 5], [8, 11], [3, 7]], dtype=np.uint8))\n",
"\n",
"minimum([[9, 8], [4, 3], [5, 9]]) homomorphically evaluates to [[9, 5], [4, 3], [3, 7]]\n",
"\n"
]
}
],
"source": [
"for operation in supported_operations:\n",
" compiler = cnp.Compiler(operation, {\"x\": \"encrypted\"})\n",
" circuit = compiler.compile(inputset)\n",
" \n",
" # We setup an example tensor that will be encrypted and passed on to the current operation\n",
" sample = np.random.randint(3, 11, size=(3, 2), dtype=np.uint8)\n",
" result = circuit.encrypt_run_decrypt(sample)\n",
" \n",
" print(\"#######################################################################################\")\n",
" print()\n",
" print(f\"{inspect.getsource(operation)}\")\n",
" print(f\"{operation.__name__}({sample.tolist()}) homomorphically evaluates to {result if isinstance(result, int) else result.tolist()}\")\n",
" print()\n",
"\n",
" expected = operation(sample)\n",
" if not np.array_equal(result, expected):\n",
" print(f\"(It should have been evaluated to {expected if isinstance(expected, int) else expected.tolist()} but it didn't due to an error during PBS)\")\n",
" print()"
]
}
],
"metadata": {
"execution": {
"timeout": 10800
}
},
"nbformat": 4,
"nbformat_minor": 5
}

323
poetry.lock generated
View File

@@ -1,11 +1,3 @@
[[package]]
name = "alabaster"
version = "0.7.12"
description = "A configurable sidebar-enabled Sphinx theme"
category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "appnope"
version = "0.1.3"
@@ -94,17 +86,6 @@ docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
[[package]]
name = "babel"
version = "2.10.1"
description = "Internationalization utilities"
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
pytz = ">=2015.7"
[[package]]
name = "backcall"
version = "0.2.0"
@@ -115,11 +96,11 @@ python-versions = "*"
[[package]]
name = "beautifulsoup4"
version = "4.10.0"
version = "4.11.1"
description = "Screen-scraping library"
category = "dev"
optional = false
python-versions = ">3.0.0"
python-versions = ">=3.6.0"
[package.dependencies]
soupsieve = ">1.2"
@@ -336,7 +317,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
name = "docutils"
version = "0.17.1"
version = "0.18.1"
description = "Docutils -- Python Documentation Utilities"
category = "dev"
optional = false
@@ -489,14 +470,6 @@ category = "dev"
optional = false
python-versions = ">=3.5"
[[package]]
name = "imagesize"
version = "1.3.0"
description = "Getting image size from png/jpeg/jpeg2000/gif file"
category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "importlib-metadata"
version = "4.11.4"
@@ -830,25 +803,6 @@ category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "markdown-it-py"
version = "1.1.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
category = "dev"
optional = false
python-versions = "~=3.6"
[package.dependencies]
attrs = ">=19,<22"
[package.extras]
code_style = ["pre-commit (==2.6)"]
compare = ["commonmark (>=0.9.1,<0.10.0)", "markdown (>=3.2.2,<3.3.0)", "mistletoe-ebp (>=0.10.0,<0.11.0)", "mistune (>=0.8.4,<0.9.0)", "panflute (>=1.12,<2.0)"]
linkify = ["linkify-it-py (>=1.0,<2.0)"]
plugins = ["mdit-py-plugins"]
rtd = ["myst-nb (==0.13.0a1)", "pyyaml", "sphinx (>=2,<4)", "sphinx-copybutton", "sphinx-panels (>=0.4.0,<0.5.0)", "sphinx-book-theme"]
testing = ["coverage", "psutil", "pytest (>=3.6,<4)", "pytest-benchmark (>=3.2,<4.0)", "pytest-cov", "pytest-regressions"]
[[package]]
name = "markupsafe"
version = "2.1.1"
@@ -895,22 +849,6 @@ category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "mdit-py-plugins"
version = "0.2.8"
description = "Collection of plugins for markdown-it-py"
category = "dev"
optional = false
python-versions = "~=3.6"
[package.dependencies]
markdown-it-py = ">=1.0,<2.0"
[package.extras]
code_style = ["pre-commit (==2.6)"]
rtd = ["myst-parser (==0.14.0a3)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
testing = ["coverage", "pytest (>=3.6,<4)", "pytest-cov", "pytest-regressions"]
[[package]]
name = "mistune"
version = "0.8.4"
@@ -952,28 +890,6 @@ category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "myst-parser"
version = "0.15.2"
description = "An extended commonmark compliant parser, with bridges to docutils & sphinx."
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
docutils = ">=0.15,<0.18"
jinja2 = "*"
markdown-it-py = ">=1.0.0,<2.0.0"
mdit-py-plugins = ">=0.2.8,<0.3.0"
pyyaml = "*"
sphinx = ">=3.1,<5"
[package.extras]
code_style = ["pre-commit (>=2.12,<3.0)"]
linkify = ["linkify-it-py (>=1.0,<2.0)"]
rtd = ["ipython", "sphinx-book-theme (>=0.1.0,<0.2.0)", "sphinx-panels (>=0.5.2,<0.6.0)", "sphinxcontrib-bibtex (>=2.1,<3.0)", "sphinxext-rediraffe (>=0.2,<1.0)", "sphinxcontrib.mermaid (>=0.6.3,<0.7.0)", "sphinxext-opengraph (>=0.4.2,<0.5.0)"]
testing = ["beautifulsoup4", "coverage", "docutils (>=0.17.0,<0.18.0)", "pytest (>=3.6,<4)", "pytest-cov", "pytest-regressions"]
[[package]]
name = "nbclient"
version = "0.5.13"
@@ -1058,22 +974,6 @@ pydantic = ">=1.7.2,<2.0.0"
Pygments = ">=2.7.3,<3.0.0"
pytest = ">=6.1.0"
[[package]]
name = "nbsphinx"
version = "0.8.7"
description = "Jupyter Notebook Tools for Sphinx"
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
docutils = "*"
jinja2 = "*"
nbconvert = "!=5.4"
nbformat = "*"
sphinx = ">=1.8"
traitlets = "*"
[[package]]
name = "nest-asyncio"
version = "1.5.5"
@@ -1646,14 +1546,6 @@ docs = ["Sphinx (==1.3.6)"]
mypy = ["mypy", "types-requests"]
test = ["coverage (>=5,<6)", "pytest (>=5,<6)", "pytest-xdist (>=1,<2)", "pytest-mock (>=2,<3)", "responses (==0.13.3)", "mock (==1.3.0)"]
[[package]]
name = "pytz"
version = "2022.1"
description = "World timezone definitions, modern and historical"
category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "pywin32"
version = "304"
@@ -1885,138 +1777,6 @@ category = "dev"
optional = false
python-versions = ">=3.6"
[[package]]
name = "sphinx"
version = "4.3.0"
description = "Python documentation generator"
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
alabaster = ">=0.7,<0.8"
babel = ">=1.3"
colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""}
docutils = ">=0.14,<0.18"
imagesize = "*"
Jinja2 = ">=2.3"
packaging = "*"
Pygments = ">=2.0"
requests = ">=2.5.0"
snowballstemmer = ">=1.1"
sphinxcontrib-applehelp = "*"
sphinxcontrib-devhelp = "*"
sphinxcontrib-htmlhelp = ">=2.0.0"
sphinxcontrib-jsmath = "*"
sphinxcontrib-qthelp = "*"
sphinxcontrib-serializinghtml = ">=1.1.5"
[package.extras]
docs = ["sphinxcontrib-websupport"]
lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.900)", "docutils-stubs", "types-typed-ast", "types-pkg-resources", "types-requests"]
test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"]
[[package]]
name = "sphinx-copybutton"
version = "0.4.0"
description = "Add a copy button to each of your code cells."
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
sphinx = ">=1.8"
[package.extras]
code_style = ["pre-commit (==2.12.1)"]
rtd = ["sphinx", "ipython", "sphinx-book-theme"]
[[package]]
name = "sphinx-zama-theme"
version = "3.0.1"
description = "Zama sphinx theme forked from PyData sphinx theme"
category = "dev"
optional = false
python-versions = ">=3.5"
[package.dependencies]
beautifulsoup4 = "4.10.0"
docutils = "0.17.1"
myst-parser = "0.15.2"
sphinx = "4.3.0"
sphinx-copybutton = "0.4.0"
[[package]]
name = "sphinxcontrib-applehelp"
version = "1.0.2"
description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
category = "dev"
optional = false
python-versions = ">=3.5"
[package.extras]
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-devhelp"
version = "1.0.2"
description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
category = "dev"
optional = false
python-versions = ">=3.5"
[package.extras]
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-htmlhelp"
version = "2.0.0"
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
category = "dev"
optional = false
python-versions = ">=3.6"
[package.extras]
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest", "html5lib"]
[[package]]
name = "sphinxcontrib-jsmath"
version = "1.0.1"
description = "A sphinx extension which renders display math in HTML via JavaScript"
category = "dev"
optional = false
python-versions = ">=3.5"
[package.extras]
test = ["pytest", "flake8", "mypy"]
[[package]]
name = "sphinxcontrib-qthelp"
version = "1.0.3"
description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
category = "dev"
optional = false
python-versions = ">=3.5"
[package.extras]
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-serializinghtml"
version = "1.1.5"
description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
category = "dev"
optional = false
python-versions = ">=3.5"
[package.extras]
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest"]
[[package]]
name = "stack-data"
version = "0.2.0"
@@ -2246,13 +2006,9 @@ full = ["pygraphviz"]
[metadata]
lock-version = "1.1"
python-versions = ">=3.8,<3.10"
content-hash = "22b4738ba17cdaca07020871b0003c564b27bfdd5ed63954954b53cb7c109330"
content-hash = "9d0d9af2de732a27512f6a95416d849df0295cb056366996cb67a8967c51bd3f"
[metadata.files]
alabaster = [
{file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"},
{file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"},
]
appnope = [
{file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
{file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
@@ -2300,17 +2056,13 @@ attrs = [
{file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
{file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
]
babel = [
{file = "Babel-2.10.1-py3-none-any.whl", hash = "sha256:3f349e85ad3154559ac4930c3918247d319f21910d5ce4b25d439ed8693b98d2"},
{file = "Babel-2.10.1.tar.gz", hash = "sha256:98aeaca086133efb3e1e2aad0396987490c8425929ddbcfe0550184fdc54cd13"},
]
backcall = [
{file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
{file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
]
beautifulsoup4 = [
{file = "beautifulsoup4-4.10.0-py3-none-any.whl", hash = "sha256:9a315ce70049920ea4572a4055bc4bd700c940521d36fc858205ad4fcde149bf"},
{file = "beautifulsoup4-4.10.0.tar.gz", hash = "sha256:c23ad23c521d818955a4151a67d81580319d4bf548d3d49f4223ae041ff98891"},
{file = "beautifulsoup4-4.11.1-py3-none-any.whl", hash = "sha256:58d5c3d29f5a36ffeb94f02f0d786cd53014cf9b3b3951d42e0080d8a9498d30"},
{file = "beautifulsoup4-4.11.1.tar.gz", hash = "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"},
]
black = [
{file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
@@ -2529,8 +2281,8 @@ defusedxml = [
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
docutils = [
{file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"},
{file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"},
{file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"},
{file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"},
]
dotty-dict = [
{file = "dotty_dict-1.3.0.tar.gz", hash = "sha256:eb0035a3629ecd84397a68f1f42f1e94abd1c34577a19cd3eacad331ee7cbaf0"},
@@ -2579,10 +2331,6 @@ idna = [
{file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
{file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
]
imagesize = [
{file = "imagesize-1.3.0-py2.py3-none-any.whl", hash = "sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c"},
{file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"},
]
importlib-metadata = [
{file = "importlib_metadata-4.11.4-py3-none-any.whl", hash = "sha256:c58c8eb8a762858f49e18436ff552e83914778e50e9d2f1660535ffb364552ec"},
{file = "importlib_metadata-4.11.4.tar.gz", hash = "sha256:5d26852efe48c0a32b0509ffbc583fda1a2266545a78d104a6f4aff3db17d700"},
@@ -2752,10 +2500,6 @@ lockfile = [
{file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"},
{file = "lockfile-0.12.2.tar.gz", hash = "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799"},
]
markdown-it-py = [
{file = "markdown-it-py-1.1.0.tar.gz", hash = "sha256:36be6bb3ad987bfdb839f5ba78ddf094552ca38ccbd784ae4f74a4e1419fc6e3"},
{file = "markdown_it_py-1.1.0-py3-none-any.whl", hash = "sha256:98080fc0bc34c4f2bcf0846a096a9429acbd9d5d8e67ed34026c03c61c464389"},
]
markupsafe = [
{file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
{file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
@@ -2843,10 +2587,6 @@ mccabe = [
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
{file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
]
mdit-py-plugins = [
{file = "mdit-py-plugins-0.2.8.tar.gz", hash = "sha256:5991cef645502e80a5388ec4fc20885d2313d4871e8b8e320ca2de14ac0c015f"},
{file = "mdit_py_plugins-0.2.8-py3-none-any.whl", hash = "sha256:1833bf738e038e35d89cb3a07eb0d227ed647ce7dd357579b65343740c6d249c"},
]
mistune = [
{file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"},
{file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"},
@@ -2913,10 +2653,6 @@ mypy-extensions = [
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
]
myst-parser = [
{file = "myst-parser-0.15.2.tar.gz", hash = "sha256:f7f3b2d62db7655cde658eb5d62b2ec2a4631308137bd8d10f296a40d57bbbeb"},
{file = "myst_parser-0.15.2-py3-none-any.whl", hash = "sha256:40124b6f27a4c42ac7f06b385e23a9dcd03d84801e9c7130b59b3729a554b1f9"},
]
nbclient = [
{file = "nbclient-0.5.13-py3-none-any.whl", hash = "sha256:47ac905af59379913c1f8f541098d2550153cf8dc58553cbe18c702b181518b0"},
{file = "nbclient-0.5.13.tar.gz", hash = "sha256:40c52c9b5e3c31faecaee69f202b3f53e38d7c1c563de0fadde9d7eda0fdafe8"},
@@ -2933,10 +2669,6 @@ nbmake = [
{file = "nbmake-1.3.0-py3-none-any.whl", hash = "sha256:8b38089dd232142ce894a9ad3e57a7c0f0a0edb0254662a8446346a84ac4079d"},
{file = "nbmake-1.3.0.tar.gz", hash = "sha256:49d5c59aefe45eaf8e2d8feff86c8e6de5547d823667305562364385e60d7206"},
]
nbsphinx = [
{file = "nbsphinx-0.8.7-py3-none-any.whl", hash = "sha256:8862f291f98c1a163bdb5bac8adf25c61585a81575ac5c613320c6f3fe5c472f"},
{file = "nbsphinx-0.8.7.tar.gz", hash = "sha256:ff91b5b14ceb1a9d44193b5fc3dd3617e7b8ab59c788f7710049ce5faff2750c"},
]
nest-asyncio = [
{file = "nest_asyncio-1.5.5-py3-none-any.whl", hash = "sha256:b98e3ec1b246135e4642eceffa5a6c23a3ab12c82ff816a92c612d68205813b2"},
{file = "nest_asyncio-1.5.5.tar.gz", hash = "sha256:e442291cd942698be619823a17a86a5759eabe1f8613084790de189fe9e16d65"},
@@ -3263,10 +2995,6 @@ python-semantic-release = [
{file = "python-semantic-release-7.23.0.tar.gz", hash = "sha256:48c33bf671dafa1257e7d955543856eb98486a3f976f586053556ae180d725da"},
{file = "python_semantic_release-7.23.0-py3-none-any.whl", hash = "sha256:5bf7fcdb28e5e9888c9a15a1168afe53302116a6874d818580d4c58db60283ab"},
]
pytz = [
{file = "pytz-2022.1-py2.py3-none-any.whl", hash = "sha256:e68985985296d9a66a881eb3193b0906246245294a881e7c8afe623866ac6a5c"},
{file = "pytz-2022.1.tar.gz", hash = "sha256:1e760e2fe6a8163bc0b3d9a19c4f84342afa0a2affebfaa84b01b978a02ecaa7"},
]
pywin32 = [
{file = "pywin32-304-cp310-cp310-win32.whl", hash = "sha256:3c7bacf5e24298c86314f03fa20e16558a4e4138fc34615d7de4070c23e65af3"},
{file = "pywin32-304-cp310-cp310-win_amd64.whl", hash = "sha256:4f32145913a2447736dad62495199a8e280a77a0ca662daa2332acf849f0be48"},
@@ -3448,41 +3176,6 @@ soupsieve = [
{file = "soupsieve-2.3.2.post1-py3-none-any.whl", hash = "sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759"},
{file = "soupsieve-2.3.2.post1.tar.gz", hash = "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"},
]
sphinx = [
{file = "Sphinx-4.3.0-py3-none-any.whl", hash = "sha256:7e2b30da5f39170efcd95c6270f07669d623c276521fee27ad6c380f49d2bf5b"},
{file = "Sphinx-4.3.0.tar.gz", hash = "sha256:6d051ab6e0d06cba786c4656b0fe67ba259fe058410f49e95bee6e49c4052cbf"},
]
sphinx-copybutton = [
{file = "sphinx-copybutton-0.4.0.tar.gz", hash = "sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386"},
{file = "sphinx_copybutton-0.4.0-py3-none-any.whl", hash = "sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0"},
]
sphinx-zama-theme = [
{file = "sphinx_zama_theme-3.0.1.tar.gz", hash = "sha256:e92de063272445ade59d052dbc6e36cbb9a39e9f44d7c38d4e45c865d9a3c5e7"},
]
sphinxcontrib-applehelp = [
{file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
{file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"},
]
sphinxcontrib-devhelp = [
{file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
{file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
]
sphinxcontrib-htmlhelp = [
{file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"},
{file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"},
]
sphinxcontrib-jsmath = [
{file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
{file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
]
sphinxcontrib-qthelp = [
{file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
{file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
]
sphinxcontrib-serializinghtml = [
{file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
{file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
]
stack-data = [
{file = "stack_data-0.2.0-py3-none-any.whl", hash = "sha256:999762f9c3132308789affa03e9271bbbe947bf78311851f4d485d8402ed858e"},
{file = "stack_data-0.2.0.tar.gz", hash = "sha256:45692d41bd633a9503a5195552df22b583caf16f0b27c4e58c98d88c8b648e12"},

View File

@@ -62,7 +62,6 @@ pydocstyle = "^6.1.1"
jupyter = "^1.0.0"
flake8 = "^4.0.1"
flake8-bugbear = "^21.11.29"
nbsphinx = "0.8.7"
tqdm = "^4.62.3"
psutil = "^5.9.0"
py-cpuinfo = "^8.0.0"
@@ -77,7 +76,6 @@ pytest-randomly = "^3.11.0"
pygments-style-tomorrow = "^1.0.0"
beautifulsoup4 = "^4.10.0"
pip-licenses = "^3.5.3"
sphinx-zama-theme = "3.0.1"
pip-audit = "^1.1.1"
pytest-codeblocks = "^0.12.2"
twine = "^3.7.1"
@@ -97,5 +95,4 @@ filterwarnings = [
[tool.semantic_release]
version_toml = "pyproject.toml:tool.poetry.version"
version_variable = "docs/conf.py:release"
upload_to_pypi = "False"

View File

@@ -173,39 +173,6 @@ def get_variable_from_toml_file(file_path: Path, var_name: str):
return current_content
def check_version(args):
"""check-version command entry point."""
version_str_set = set()
file_vars_set = load_file_vars_set(args.pyproject_file, args.file_vars)
for file_var_str in sorted(file_vars_set):
print(f"Processing {file_var_str}")
file, var_name = file_var_str.split(":", 1)
file_path = Path(file).resolve()
if file_path.suffix == ".py":
version_str_set.update(get_variable_from_py_file(file_path, var_name))
elif file_path.suffix == ".toml":
version_str_set.add(get_variable_from_toml_file(file_path, var_name))
else:
raise RuntimeError(f"Unsupported file extension: {file_path.suffix}")
if len(version_str_set) == 0:
raise RuntimeError(f"No versions found in {', '.join(sorted(file_vars_set))}")
if len(version_str_set) > 1:
raise RuntimeError(
f"Found more than one version: {', '.join(sorted(version_str_set))}\n"
"Re-run make set-version"
)
# Now version_str_set len == 1
if not VersionInfo.isvalid((version := next(iter(version_str_set)))):
raise RuntimeError(f"Unable to validate version: {version}")
print(f"Found version {version} in all processed locations.")
def main(args):
"""Entry point"""
args.entry_point(args)
@@ -247,23 +214,6 @@ if __name__ == "__main__":
)
parser_set_version.set_defaults(entry_point=set_version)
parser_check_version = sub_parsers.add_parser("check-version")
parser_check_version.add_argument(
"--pyproject-file",
type=str,
default="pyproject.toml",
help="The path to a project's pyproject.toml file, defaults to $pwd/pyproject.toml",
)
parser_check_version.add_argument(
"--file-vars",
type=str,
nargs="+",
help=(
"A space separated list of file/path.{py, toml}:variable to update with the new version"
),
)
parser_check_version.set_defaults(entry_point=check_version)
cli_args = main_parser.parse_args()
main(cli_args)