diff --git a/.github/workflows/pep8.yml b/.github/workflows/pep8.yml index d5d0aea9e..4d26a4458 100644 --- a/.github/workflows/pep8.yml +++ b/.github/workflows/pep8.yml @@ -18,6 +18,5 @@ jobs: uses: actions/checkout@v2 - name: PEP8 run: | - pip install --upgrade pyproject-flake8 - flake8 generate_data.py - flake8 verify_curves.py + pip install --upgrade ruff + ruff . diff --git a/lattice-scripts/compare_curves_and_estimator.py b/lattice-scripts/compare_curves_and_estimator.py index 6e965f9f7..598d74335 100644 --- a/lattice-scripts/compare_curves_and_estimator.py +++ b/lattice-scripts/compare_curves_and_estimator.py @@ -1,10 +1,10 @@ -import sys -sys.path.insert(1, 'lattice-estimator') -from estimator import * -from sage.all import oo, save, load, ceil, floor +from estimator import LWE, ND +from sage.all import oo, load, floor from generate_data import estimate, get_security_level import argparse import os +import sys +sys.path.insert(1, 'lattice-estimator') LOG_N_MAX = 17 + 1 @@ -98,13 +98,13 @@ def compare_curve_and_estimator(security_level, log_q, curves_dir): # step 2. check security of those points for lwe_dim in lwe_dimensions: - print(f"-------------------------") + print("-------------------------") # (i) get stddev with current curves predicted_stddev = estimate_stddev_with_current_curve(curve, lwe_dim, log_q) # (ii) estimate up-to-date security predicted_security = estimate_security_with_lattice_estimator(lwe_dim, predicted_stddev, log_q) - print(f"-------------------------") + print("-------------------------") print(f"lwe dim: {lwe_dim}") print(f"stddev: {predicted_stddev}") print(f"Security: {predicted_security}") diff --git a/lattice-scripts/generate_data.py b/lattice-scripts/generate_data.py index 735213300..5d2693d2f 100644 --- a/lattice-scripts/generate_data.py +++ b/lattice-scripts/generate_data.py @@ -1,11 +1,12 @@ -import sys -sys.path.insert(1, 'lattice-estimator') -from estimator import * -from sage.all import oo, save, load, ceil +from estimator import RC, LWE, ND +from sage.all import oo, save, load from math import log2 import multiprocessing import argparse import os +import sys +sys.path.insert(1, 'lattice-estimator') + old_models_sobj = "" @@ -149,8 +150,8 @@ def generate_parameter_matrix( (sd_min, sd_max) = sd_range for lam in target_security_levels: for sd in range(sd_min, sd_max + 1): - print("run for {}".format(lam, sd)) - Xe_new = nd.NoiseDistribution.DiscreteGaussian(2 ** sd) + print(f"run for {lam} {sd}") + Xe_new = ND.NoiseDistribution.DiscreteGaussian(2 ** sd) (params_out, sec) = automated_param_select_n( params_in.updated(Xe=Xe_new), target_security=lam ) @@ -190,7 +191,7 @@ def generate_zama_curves64( inputs = [ (init_params, (val, val), target_security_levels, name) for val in vals ] - res = pool.starmap(generate_parameter_matrix, inputs) + _res = pool.starmap(generate_parameter_matrix, inputs) return "done" diff --git a/lattice-scripts/verify_curves.py b/lattice-scripts/verify_curves.py index 85af038c2..d63824511 100644 --- a/lattice-scripts/verify_curves.py +++ b/lattice-scripts/verify_curves.py @@ -95,7 +95,7 @@ def generate_and_verify(security_levels, log_q, curves_dir, name="verified_curve json.append({"slope": a_sec, "bias": b_sec - log_q, "security_level": sec, "minimal_lwe_dimension": n_alpha}) success.append((a_sec, b_sec - log_q, sec, a_sec, b_sec)) else: - fail.append(x) + fail.append(sec) save(success, os.path.join(curves_dir, name)) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..54825080d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.ruff] +line-length = 169