Merge pull request #24 from mhchia/feat/data-commitment-maps-to-file

Generate data commitment in json file
This commit is contained in:
Kevin Mai-Husan Chia
2024-04-10 22:24:59 +08:00
committed by GitHub
32 changed files with 237 additions and 181 deletions

View File

@@ -95,13 +95,14 @@ The flow between data providers and users is as follows:
Data providers should generate commitments for their dataset beforehand. For a dataset (e.g. a table in a SQL database), there should be a commitment for each column. These commitments are used by users later, to verify the zkp proof and be convinced the computation is done with the correct dataset.
```python
from zkstats.core import get_data_commitment_maps
from zkstats.core import generate_data_commitment
data_path = "/path/to/your/data.json"
data_commitment_path = "/path/to/store/data_commitments.json"
# possible_scales is a list of possible scales for the data to be encoded. For example, here we use [0, 20) as the possible scales, to make sure
possible_scales = list(range(20))
# `commitment_maps` is derived by data providers and shared with users
commitment_maps = get_data_commitment_maps(data_path, possible_scales)
# data commitment is generated by data providers and shared with users
generate_data_commitment(data_path, possible_scales, data_commitment_path)
```
When generating a proof, since dataset might contain floating points, data providers need to specify a proper "scale" to encode and decode floating points. Scale is chosen based on the value precision in the dataset and the type of computation. `possible_scales` should cover as many scales as possible and data providers should always use the scales within `possible_scales`, to make sure users can always get the corresponding commitments to verify the proofs.
@@ -187,7 +188,7 @@ res = verifier_verify(
settings_path, # path to the settings file
vk_path, # path to the verification key
selected_columns, # selected columns
commitment_maps, # commitment maps
data_commitment_path, # path to the data commitment
)
print("The result is", res)
```

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -94,7 +94,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['x', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -221,7 +221,7 @@
],
"source": [
"# Verifier verifies\n",
"verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)"
"verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)"
]
},
{
@@ -248,7 +248,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.12.1"
},
"orig_nbformat": 4
},

File diff suppressed because one or more lines are too long

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -82,7 +83,7 @@
"# note scale = 2, or 3 makes it more precise, but too big.\n",
"scales = [1]\n",
"selected_columns = ['x', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -214,7 +215,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},
@@ -242,7 +243,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.12.1"
},
"orig_nbformat": 4
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -87,7 +88,7 @@
"source": [
"scales = [8]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -210,7 +211,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -81,7 +82,7 @@
"source": [
"scales = [6]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -217,7 +218,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -95,7 +96,7 @@
"source": [
"scales = [5]\n",
"selected_columns = ['col_1', 'col_2']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -236,7 +237,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -99,7 +100,7 @@
"source": [
"scales = [3]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -245,7 +246,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
}

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -85,11 +86,11 @@
"outputs": [],
"source": [
"# use large scale here to get the output really close to the value in dataset, which is important to Median.\n",
"# Be careful, since we may need to calculate average of 2 values in case of no middle value in dataset, \n",
"# Be careful, since we may need to calculate average of 2 values in case of no middle value in dataset,\n",
"# this means larger scale can still blow up circuit size, unlike Mode func that scale doesnt affect circuit size much.\n",
"scales = [7]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -204,7 +205,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},
@@ -232,7 +233,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.12.1"
},
"orig_nbformat": 4
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -88,7 +89,7 @@
"# large scale doesn't blowup circuit size in Mode, so fine.\n",
"scales = [8]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -213,7 +214,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [3]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -201,7 +202,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -207,7 +208,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -81,7 +82,7 @@
"scales = [4]\n",
"# to conform to traditional regression, here only one column of x\n",
"selected_columns = ['x1', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -233,7 +234,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},
@@ -261,7 +262,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.12.1"
},
"orig_nbformat": 4
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [3]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -201,7 +202,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -201,7 +202,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -33,7 +33,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -58,7 +58,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -97,7 +98,7 @@
"source": [
"scales = [5]\n",
"selected_columns = ['x', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -219,7 +220,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -82,7 +83,7 @@
"# note scale = 2, or 3 makes it more precise, but too big.\n",
"scales = [1]\n",
"selected_columns = ['x', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -210,7 +211,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
}

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -87,7 +88,7 @@
"source": [
"scales = [8]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -216,7 +217,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -81,7 +82,7 @@
"source": [
"scales = [6]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -204,7 +205,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -87,7 +88,7 @@
"source": [
"scales = [3]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -208,7 +209,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -87,7 +88,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -208,7 +209,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -56,7 +56,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -112,7 +113,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -239,7 +240,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [3]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -207,7 +208,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -201,7 +202,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -81,7 +82,7 @@
"source": [
"scales = [4]\n",
"selected_columns = ['x1', 'y']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -222,7 +223,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [4]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -202,7 +203,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

View File

@@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps"
"from zkstats.core import create_dummy, verifier_define_calculation, prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment"
]
},
{
@@ -50,7 +50,8 @@
"# this is private to prover since it contains actual data\n",
"sel_data_path = os.path.join('prover/sel_data.json')\n",
"# this is just dummy random value\n",
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')"
"sel_dummy_data_path = os.path.join('shared/sel_dummy_data.json')\n",
"data_commitment_path = os.path.join('shared/data_commitment.json')"
]
},
{
@@ -80,7 +81,7 @@
"source": [
"scales = [2]\n",
"selected_columns = ['col_name']\n",
"commitment_maps = get_data_commitment_maps(data_path, scales)"
"generate_data_commitment(data_path, scales, data_commitment_path)"
]
},
{
@@ -196,7 +197,7 @@
],
"source": [
"# Verifier verifies\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)\n",
"res = verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)\n",
"print(\"Verifier gets result:\", res)"
]
},

52
poetry.lock generated
View File

@@ -1843,36 +1843,36 @@ files = [
[[package]]
name = "torch"
version = "2.2.1"
version = "2.2.2"
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
optional = false
python-versions = ">=3.8.0"
files = [
{file = "torch-2.2.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8d3bad336dd2c93c6bcb3268e8e9876185bda50ebde325ef211fb565c7d15273"},
{file = "torch-2.2.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5297f13370fdaca05959134b26a06a7f232ae254bf2e11a50eddec62525c9006"},
{file = "torch-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:5f5dee8433798888ca1415055f5e3faf28a3bad660e4c29e1014acd3275ab11a"},
{file = "torch-2.2.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b6d78338acabf1fb2e88bf4559d837d30230cf9c3e4337261f4d83200df1fcbe"},
{file = "torch-2.2.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:6ab3ea2e29d1aac962e905142bbe50943758f55292f1b4fdfb6f4792aae3323e"},
{file = "torch-2.2.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:d86664ec85902967d902e78272e97d1aff1d331f7619d398d3ffab1c9b8e9157"},
{file = "torch-2.2.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d6227060f268894f92c61af0a44c0d8212e19cb98d05c20141c73312d923bc0a"},
{file = "torch-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:77e990af75fb1675490deb374d36e726f84732cd5677d16f19124934b2409ce9"},
{file = "torch-2.2.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:46085e328d9b738c261f470231e987930f4cc9472d9ffb7087c7a1343826ac51"},
{file = "torch-2.2.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:2d9e7e5ecbb002257cf98fae13003abbd620196c35f85c9e34c2adfb961321ec"},
{file = "torch-2.2.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ada53aebede1c89570e56861b08d12ba4518a1f8b82d467c32665ec4d1f4b3c8"},
{file = "torch-2.2.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be21d4c41ecebed9e99430dac87de1439a8c7882faf23bba7fea3fea7b906ac1"},
{file = "torch-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:79848f46196750367dcdf1d2132b722180b9d889571e14d579ae82d2f50596c5"},
{file = "torch-2.2.1-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:7ee804847be6be0032fbd2d1e6742fea2814c92bebccb177f0d3b8e92b2d2b18"},
{file = "torch-2.2.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:84b2fb322ab091039fdfe74e17442ff046b258eb5e513a28093152c5b07325a7"},
{file = "torch-2.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5c0c83aa7d94569997f1f474595e808072d80b04d34912ce6f1a0e1c24b0c12a"},
{file = "torch-2.2.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:91a1b598055ba06b2c386415d2e7f6ac818545e94c5def597a74754940188513"},
{file = "torch-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:8f93ddf3001ecec16568390b507652644a3a103baa72de3ad3b9c530e3277098"},
{file = "torch-2.2.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:0e8bdd4c77ac2584f33ee14c6cd3b12767b4da508ec4eed109520be7212d1069"},
{file = "torch-2.2.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:6a21bcd7076677c97ca7db7506d683e4e9db137e8420eb4a68fb67c3668232a7"},
{file = "torch-2.2.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f1b90ac61f862634039265cd0f746cc9879feee03ff962c803486301b778714b"},
{file = "torch-2.2.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed9e29eb94cd493b36bca9cb0b1fd7f06a0688215ad1e4b3ab4931726e0ec092"},
{file = "torch-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c47bc25744c743f3835831a20efdcfd60aeb7c3f9804a213f61e45803d16c2a5"},
{file = "torch-2.2.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:0952549bcb43448c8d860d5e3e947dd18cbab491b14638e21750cb3090d5ad3e"},
{file = "torch-2.2.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:26bd2272ec46fc62dcf7d24b2fb284d44fcb7be9d529ebf336b9860350d674ed"},
{file = "torch-2.2.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bc889d311a855dd2dfd164daf8cc903a6b7273a747189cebafdd89106e4ad585"},
{file = "torch-2.2.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:15dffa4cc3261fa73d02f0ed25f5fa49ecc9e12bf1ae0a4c1e7a88bbfaad9030"},
{file = "torch-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:11e8fe261233aeabd67696d6b993eeb0896faa175c6b41b9a6c9f0334bdad1c5"},
{file = "torch-2.2.2-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b2e2200b245bd9f263a0d41b6a2dab69c4aca635a01b30cca78064b0ef5b109e"},
{file = "torch-2.2.2-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:877b3e6593b5e00b35bbe111b7057464e76a7dd186a287280d941b564b0563c2"},
{file = "torch-2.2.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ad4c03b786e074f46606f4151c0a1e3740268bcf29fbd2fdf6666d66341c1dcb"},
{file = "torch-2.2.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:32827fa1fbe5da8851686256b4cd94cc7b11be962862c2293811c94eea9457bf"},
{file = "torch-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:f9ef0a648310435511e76905f9b89612e45ef2c8b023bee294f5e6f7e73a3e7c"},
{file = "torch-2.2.2-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:95b9b44f3bcebd8b6cd8d37ec802048c872d9c567ba52c894bba90863a439059"},
{file = "torch-2.2.2-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:49aa4126ede714c5aeef7ae92969b4b0bbe67f19665106463c39f22e0a1860d1"},
{file = "torch-2.2.2-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:cf12cdb66c9c940227ad647bc9cf5dba7e8640772ae10dfe7569a0c1e2a28aca"},
{file = "torch-2.2.2-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:89ddac2a8c1fb6569b90890955de0c34e1724f87431cacff4c1979b5f769203c"},
{file = "torch-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:451331406b760f4b1ab298ddd536486ab3cfb1312614cfe0532133535be60bea"},
{file = "torch-2.2.2-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:eb4d6e9d3663e26cd27dc3ad266b34445a16b54908e74725adb241aa56987533"},
{file = "torch-2.2.2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:bf9558da7d2bf7463390b3b2a61a6a3dbb0b45b161ee1dd5ec640bf579d479fc"},
{file = "torch-2.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd2bf7697c9e95fb5d97cc1d525486d8cf11a084c6af1345c2c2c22a6b0029d0"},
{file = "torch-2.2.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b421448d194496e1114d87a8b8d6506bce949544e513742b097e2ab8f7efef32"},
{file = "torch-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:3dbcd563a9b792161640c0cffe17e3270d85e8f4243b1f1ed19cca43d28d235b"},
{file = "torch-2.2.2-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:31f4310210e7dda49f1fb52b0ec9e59382cfcb938693f6d5378f25b43d7c1d29"},
{file = "torch-2.2.2-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c795feb7e8ce2e0ef63f75f8e1ab52e7fd5e1a4d7d0c31367ade1e3de35c9e95"},
{file = "torch-2.2.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:a6e5770d68158d07456bfcb5318b173886f579fdfbf747543901ce718ea94782"},
{file = "torch-2.2.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:67dcd726edff108e2cd6c51ff0e416fd260c869904de95750e80051358680d24"},
{file = "torch-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:539d5ef6c4ce15bd3bd47a7b4a6e7c10d49d4d21c0baaa87c7d2ef8698632dfb"},
{file = "torch-2.2.2-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:dff696de90d6f6d1e8200e9892861fd4677306d0ef604cb18f2134186f719f82"},
{file = "torch-2.2.2-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:3a4dd910663fd7a124c056c878a52c2b0be4a5a424188058fe97109d4436ee42"},
]
[package.dependencies]

View File

@@ -4,7 +4,7 @@ from pathlib import Path
import torch
from zkstats.core import prover_gen_settings, setup, prover_gen_proof, verifier_verify, get_data_commitment_maps
from zkstats.core import prover_gen_settings, setup, prover_gen_proof, verifier_verify, generate_data_commitment
from zkstats.computation import IModel
@@ -43,6 +43,7 @@ def compute(
pk_path = basepath / "model.pk"
vk_path = basepath / "model.vk"
data_path = basepath / "data.json"
data_commitment_path = basepath / "commitments.json"
column_to_data = data_to_file(data_path, data)
# If selected_columns_params is None, select all columns
@@ -60,7 +61,7 @@ def compute(
scales = scales_params
scales_for_commitments = scales_params
commitment_maps = get_data_commitment_maps(data_path, scales_for_commitments)
generate_data_commitment(data_path, scales_for_commitments, data_commitment_path)
prover_gen_settings(
data_path=data_path,
@@ -94,7 +95,7 @@ def compute(
str(settings_path),
str(vk_path),
selected_columns,
commitment_maps,
data_commitment_path,
)

View File

@@ -1,6 +1,8 @@
import json
import torch
from zkstats.core import get_data_commitment_maps
from zkstats.core import generate_data_commitment
from zkstats.computation import computation_to_model
from .helpers import data_to_file, compute
@@ -8,13 +10,14 @@ from .helpers import data_to_file, compute
def test_get_data_commitment_maps(tmp_path, column_0, column_1, scales):
data_path = tmp_path / "data.json"
data_commitment_path = tmp_path / "commitments.json"
# data_json is a mapping[column_name, column_data]
# {
# "columns_0": [1, 2, 3, 4, 5],
# "columns_1": [6, 7, 8, 9, 10],
# }
data_json = data_to_file(data_path, [column_0, column_1])
# commitment_maps is a mapping[scale -> mapping[column_name, commitment_hex]]
# data_commitment is a mapping[scale -> mapping[column_name, commitment_hex]]
# {
# scale_0: {
# "columns_0": "0x...",
@@ -25,10 +28,13 @@ def test_get_data_commitment_maps(tmp_path, column_0, column_1, scales):
# "columns_1": "0x...",
# }
# }
commitment_maps = get_data_commitment_maps(data_path, scales)
assert len(commitment_maps) == len(scales)
for scale, commitment_map in commitment_maps.items():
generate_data_commitment(data_path, scales, data_commitment_path)
with open(data_commitment_path, "r") as f:
data_commitment = json.load(f)
assert len(data_commitment) == len(scales)
for scale, commitment_map in data_commitment.items():
assert int(scale) in scales
assert len(commitment_map) == len(data_json)
for column_name, commitment_hex in commitment_map.items():
@@ -42,14 +48,17 @@ def test_get_data_commitment_maps_hardcoded(tmp_path):
This test is to check if the data commitment scheme doesn't change
"""
data_path = tmp_path / "data.json"
data_commitment_path = tmp_path / "commitments.json"
column_0 = torch.tensor([3.0, 4.5, 1.0, 2.0, 7.5, 6.4, 5.5])
column_1 = torch.tensor([2.7, 3.3, 1.1, 2.2, 3.8, 8.2, 4.4])
data_to_file(data_path, [column_0, column_1])
scales = [2, 3]
commitment_maps = get_data_commitment_maps(data_path, scales)
generate_data_commitment(data_path, scales, data_commitment_path)
with open(data_commitment_path, "r") as f:
data_commitment = json.load(f)
# expected = {"2": {'columns_0': '0x28b5eeb5aeee399c8c50c5b323def9a1aec1deee5b9ae193463d4f9b8893a9a3', 'columns_1': '0x0523c85a86dddd810418e8376ce6d9d21b1b7363764c9c31b575b8ffbad82987'}, "3": {'columns_0': '0x0a2906522d3f902ff4a63ee8aed4d2eaec0b14f71c51eb9557bd693a4e7d77ad', 'columns_1': '0x2dac7fee1efb9eb955f52494a26a3fba6d1fa28cc819e598cb0af31a47b29d08'}}
expected = {"2": {'columns_0': 'a3a993889b4f3d4693e19a5beedec1aea1f9de23b3c5508c9c39eeaeb5eeb528', 'columns_1': '8729d8baffb875b5319c4c7663731b1bd2d9e66c37e8180481dddd865ac82305'}, "3": {'columns_0': 'ad777d4e3a69bd5795eb511cf7140becead2d4aee83ea6f42f903f2d5206290a', 'columns_1': '089db2471af30acb98e519c88ca21f6dba3f6aa29424f555b99efb1eee7fac2d'}}
assert commitment_maps == expected
assert data_commitment == expected
def test_integration_select_partial_columns(tmp_path, column_0, column_1, error, scales):

View File

@@ -7,7 +7,7 @@ import importlib.util
import click
import torch
from .core import prover_gen_proof, prover_gen_settings, setup, verifier_verify, get_data_commitment_maps
from .core import prover_gen_proof, prover_gen_settings, setup, verifier_verify, generate_data_commitment
from .computation import computation_to_model
cwd = os.getcwd()
@@ -22,7 +22,7 @@ proof_path = f"{output_dir}/model.pf"
settings_path = f"{output_dir}/settings.json"
witness_path = f"{output_dir}/witness.json"
comb_data_path = f"{output_dir}/comb_data.json"
commitment_maps_path = f"{output_dir}/commitment_maps.json"
data_commitment_path = f"{output_dir}/data_commitment.json"
default_possible_scales = list(range(20))
@@ -38,11 +38,11 @@ def cli():
def prove(computation_path: str, data_path: str):
computation = load_computation(computation_path)
_, model = computation_to_model(computation)
commitment_maps = get_data_commitment_maps(data_path, default_possible_scales)
with open(commitment_maps_path, "w") as f:
json.dump(commitment_maps, f)
generate_data_commitment(data_path, default_possible_scales, data_commitment_path)
with open(data_commitment_path) as f:
data_commitment = json.load(f)
# By default select all columns
selected_columns = list(commitment_maps[str(default_possible_scales[0])].keys())
selected_columns = list(data_commitment[str(default_possible_scales[0])].keys())
prover_gen_settings(
data_path,
selected_columns,
@@ -71,21 +71,21 @@ def prove(computation_path: str, data_path: str):
pk_path,
)
print("Finished generating proof")
verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)
verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)
print("Proof path:", proof_path)
print("Settings path:", settings_path)
print("Verification key path:", vk_path)
print("Commitment maps path:", commitment_maps_path)
print("Commitment maps path:", data_commitment_path)
@click.command()
def verify():
# Load commitment maps
with open(commitment_maps_path, "r") as f:
commitment_maps = json.load(f)
with open(data_commitment_path, "r") as f:
data_commitment = json.load(f)
# By default select all columns
selected_columns = list(commitment_maps[str(default_possible_scales[0])].keys())
verifier_verify(proof_path, settings_path, vk_path, selected_columns, commitment_maps)
selected_columns = list(data_commitment[str(default_possible_scales[0])].keys())
verifier_verify(proof_path, settings_path, vk_path, selected_columns, data_commitment_path)
@click.command()
@@ -96,8 +96,10 @@ def commit(data_path: str, scale_str: str):
Now we just assume the data is a list of floats. We should be able to
"""
scale = int(scale_str)
commitment_maps = get_data_commitment_maps(data_path, [scale])
print("Commitment maps:", commitment_maps)
generate_data_commitment(data_path, [scale], data_commitment_path)
with open(data_commitment_path) as f:
data_commitment = json.load(f)
print("Commitment maps:", data_commitment)
def main():

View File

@@ -191,7 +191,7 @@ def prover_gen_proof(
# ...
# }
TCommitmentMap = Mapping[str, str]
# commitment_maps is a mapping[scale, mapping[column_name, commitment_hex]]
# data_commitment is a mapping[scale, mapping[column_name, commitment_hex]]
# E.g. {
# scale_0: {
# "columns_0": "0x...",
@@ -201,7 +201,7 @@ TCommitmentMap = Mapping[str, str]
# }
TCommitmentMaps = Mapping[str, TCommitmentMap]
def verifier_verify(proof_path: str, settings_path: str, vk_path: str, selected_columns: Sequence[str], commitment_maps: TCommitmentMaps) -> torch.Tensor:
def verifier_verify(proof_path: str, settings_path: str, vk_path: str, selected_columns: Sequence[str], data_commitment_path: str) -> torch.Tensor:
"""
Verify the proof and return the result.
@@ -237,6 +237,8 @@ def verifier_verify(proof_path: str, settings_path: str, vk_path: str, selected_
assert len(proof_instance) == len_inputs + len_outputs, f"lengths mismatch: {len(proof_instance)=}, {len_inputs=}, {len_outputs=}"
# 2.1 Check input commitments
with open(data_commitment_path) as f:
data_commitment = json.load(f)
# All inputs are hashed so are commitments
assert len_inputs == len(selected_columns), f"lengths mismatch: {len_inputs=}, {len(selected_columns)=}"
# Sanity check
@@ -245,7 +247,7 @@ def verifier_verify(proof_path: str, settings_path: str, vk_path: str, selected_
# actual_commitment_str = ezkl.vecu64_to_felt(actual_commitment)
actual_commitment_str = (actual_commitment)
input_scale = input_scales[i]
expected_commitment = commitment_maps[str(input_scale)][column_name]
expected_commitment = data_commitment[str(input_scale)][column_name]
assert actual_commitment_str == expected_commitment, f"commitment mismatch: {i=}, {actual_commitment_str=}, {expected_commitment=}"
# 2.2 Check output is correct
@@ -263,26 +265,29 @@ def verifier_verify(proof_path: str, settings_path: str, vk_path: str, selected_
# ===================================================================================================
# ===================================================================================================
def get_data_commitment_maps(data_path: str, scales: Sequence[int]) -> TCommitmentMaps:
def generate_data_commitment(data_path: str, scales: Sequence[int], data_commitment_path: str) -> None:
"""
Generate a data commitment map for each scale. Commitments for different scales are required
so that verifiers can verify proofs with different scales.
Generate and store data commitment maps for different scales so that verifiers can verify
proofs with different scales.
:param data_path: path to the data file. The data file should be a JSON file with the following format:
{
"column_0": [number_0, number_1, ...],
"column_1": [number_0, number_1, ...],
}
:param scales: a list of scales to use for the commitments.
:return: a map from scale to column name to commitment.
:param scales: a list of scales to use for the commitments
:param data_commitment_path: path to store the generated data commitment maps
"""
with open(data_path) as f:
data_json = json.load(f)
return {
data_commitments = {
str(scale): {
k: _get_commitment_for_column(v, scale) for k, v in data_json.items()
} for scale in scales
}
with open(data_commitment_path, "w") as f:
json.dump(data_commitments, f)
# ===================================================================================================