mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-15 07:05:09 -05:00
chore: remove old files
This commit is contained in:
@@ -1,19 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from .nd import NoiseDistribution as ND # noqa
|
||||
from .io import Logging # noqa
|
||||
from . import reduction as RC # noqa
|
||||
from . import simulator as Simulator # noqa
|
||||
from . import lwe as LWE # noqa
|
||||
|
||||
from .schemes import ( # noqa
|
||||
Kyber512,
|
||||
Kyber768,
|
||||
Kyber1024,
|
||||
LightSaber,
|
||||
Saber,
|
||||
FireSaber,
|
||||
NTRUHPS2048509Enc,
|
||||
NTRUHPS2048677Enc,
|
||||
NTRUHPS4096821Enc,
|
||||
NTRUHRSS701Enc,
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,13 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Default values.
|
||||
"""
|
||||
|
||||
from .reduction import Kyber, ABLR21
|
||||
from .simulator import GSA
|
||||
|
||||
red_cost_model = Kyber
|
||||
red_cost_model_classical_poly_space = ABLR21
|
||||
red_shape_model = "gsa"
|
||||
red_simulator = GSA
|
||||
mitm_opt = "analytical"
|
||||
@@ -1,258 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import round, log, oo
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cost:
|
||||
"""
|
||||
Algorithms costs.
|
||||
"""
|
||||
|
||||
rop: float = oo
|
||||
tag: str = None
|
||||
|
||||
# An entry is "impermanent" if it grows when we run the algorithm again. For example, `δ`
|
||||
# would not scale with the number of operations but `rop` would. This check is strict such that
|
||||
# unknown entries raise an error. This is to enforce a decision on whether an entry should be
|
||||
# scaled.
|
||||
|
||||
impermanents = {
|
||||
"rop": True,
|
||||
"repetitions": False,
|
||||
"tag": False,
|
||||
"problem": False,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def register_impermanent(cls, data=None, **kwds):
|
||||
if data is not None:
|
||||
for k, v in data.items():
|
||||
if cls.impermanents.get(k, v) != v:
|
||||
raise ValueError(f"Attempting to overwrite {k}:{cls.impermanents[k]} with {v}")
|
||||
cls.impermanents[k] = v
|
||||
|
||||
for k, v in kwds.items():
|
||||
if cls.impermanents.get(k, v) != v:
|
||||
raise ValueError(f"Attempting to overwrite {k}:{cls.impermanents[k]} with {v}")
|
||||
cls.impermanents[k] = v
|
||||
|
||||
key_map = {
|
||||
"delta": "δ",
|
||||
"beta": "β",
|
||||
"eta": "η",
|
||||
"epsilon": "ε",
|
||||
"zeta": "ζ",
|
||||
"ell": "ℓ",
|
||||
"repetitions": "↻",
|
||||
}
|
||||
val_map = {"beta": "%8d", "d": "%8d", "delta": "%8.6f"}
|
||||
|
||||
def __init__(self, **kwds):
|
||||
for k, v in kwds.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def str(self, keyword_width=None, newline=None, round_bound=2048, compact=False): # noqa C901
|
||||
"""
|
||||
|
||||
:param keyword_width: keys are printed with this width
|
||||
:param newline: insert a newline
|
||||
:param round_bound: values beyond this bound are represented as powers of two
|
||||
:param compact: do not add extra whitespace to align entries
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> s = Cost(delta=5, bar=2)
|
||||
>>> s
|
||||
δ: 5.000000, bar: 2
|
||||
|
||||
"""
|
||||
|
||||
def wfmtf(k):
|
||||
if keyword_width:
|
||||
fmt = "%%%ss" % keyword_width
|
||||
else:
|
||||
fmt = "%s"
|
||||
return fmt % k
|
||||
|
||||
d = self.__dict__
|
||||
s = []
|
||||
for k, v in d.items():
|
||||
if k == "problem": # we store the problem instance in a cost object for reference
|
||||
continue
|
||||
kk = wfmtf(self.key_map.get(k, k))
|
||||
try:
|
||||
if (1 / round_bound < abs(v) < round_bound) or (not v) or (k in self.val_map):
|
||||
if abs(v % 1) < 0.0000001:
|
||||
vv = self.val_map.get(k, "%8d") % round(v)
|
||||
else:
|
||||
vv = self.val_map.get(k, "%8.3f") % v
|
||||
else:
|
||||
vv = "%7s" % ("≈2^%.1f" % log(v, 2))
|
||||
except TypeError: # strings and such
|
||||
vv = "%8s" % v
|
||||
if compact:
|
||||
kk = kk.strip()
|
||||
vv = vv.strip()
|
||||
s.append(f"{kk}: {vv}")
|
||||
|
||||
if not newline:
|
||||
return ", ".join(s)
|
||||
else:
|
||||
return "\n".join(s)
|
||||
|
||||
def reorder(self, *args):
|
||||
"""
|
||||
Return a new ordered dict from the key:value pairs in dictinonary but reordered such that the
|
||||
keys given to this function come first.
|
||||
|
||||
:param args: keys which should come first (in order)
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> d = Cost(a=1,b=2,c=3); d
|
||||
a: 1, b: 2, c: 3
|
||||
|
||||
>>> d.reorder("b","c","a")
|
||||
b: 2, c: 3, a: 1
|
||||
|
||||
"""
|
||||
keys = list(self.__dict__.keys())
|
||||
for key in args:
|
||||
keys.pop(keys.index(key))
|
||||
keys = list(args) + keys
|
||||
r = dict()
|
||||
for key in keys:
|
||||
r[key] = self.__dict__[key]
|
||||
return Cost(**r)
|
||||
|
||||
def filter(self, **keys):
|
||||
"""
|
||||
Return new ordered dictinonary from dictionary restricted to the keys.
|
||||
|
||||
:param dictionary: input dictionary
|
||||
:param keys: keys which should be copied (ordered)
|
||||
"""
|
||||
r = dict()
|
||||
for key in keys:
|
||||
r[key] = self.__dict__[key]
|
||||
return Cost(**r)
|
||||
|
||||
def repeat(self, times, select=None):
|
||||
"""
|
||||
Return a report with all costs multiplied by ``times``.
|
||||
|
||||
:param times: the number of times it should be run
|
||||
:param select: toggle which fields ought to be repeated and which should not
|
||||
:returns: a new cost estimate
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> c0 = Cost(a=1, b=2)
|
||||
>>> c0.register_impermanent(a=True, b=False)
|
||||
>>> c0.repeat(1000)
|
||||
a: 1000, b: 2, ↻: 1000
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> Cost(rop=1).repeat(1000).repeat(1000)
|
||||
rop: ≈2^19.9, ↻: ≈2^19.9
|
||||
|
||||
"""
|
||||
impermanents = dict(self.impermanents)
|
||||
|
||||
if select is not None:
|
||||
for key in select:
|
||||
impermanents[key] = select[key]
|
||||
|
||||
ret = dict()
|
||||
for key in self.__dict__:
|
||||
try:
|
||||
if impermanents[key]:
|
||||
ret[key] = times * self.__dict__[key]
|
||||
else:
|
||||
ret[key] = self.__dict__[key]
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
f"You found a bug, this function does not know about '{key}' but should."
|
||||
)
|
||||
ret["repetitions"] = times * ret.get("repetitions", 1)
|
||||
return Cost(**ret)
|
||||
|
||||
def __rmul__(self, times):
|
||||
return self.repeat(times)
|
||||
|
||||
def combine(self, right, base=None):
|
||||
"""Combine ``left`` and ``right``.
|
||||
|
||||
:param left: cost dictionary
|
||||
:param right: cost dictionary
|
||||
:param base: add entries to ``base``
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> c0 = Cost(a=1)
|
||||
>>> c1 = Cost(b=2)
|
||||
>>> c2 = Cost(c=3)
|
||||
>>> c0.combine(c1)
|
||||
a: 1, b: 2
|
||||
>>> c0.combine(c1, base=c2)
|
||||
c: 3, a: 1, b: 2
|
||||
|
||||
"""
|
||||
if base is None:
|
||||
cost = dict()
|
||||
else:
|
||||
cost = base.__dict__
|
||||
for key in self.__dict__:
|
||||
cost[key] = self.__dict__[key]
|
||||
for key in right:
|
||||
cost[key] = right.__dict__[key]
|
||||
return Cost(**cost)
|
||||
|
||||
def __bool__(self):
|
||||
return self.__dict__.get("rop", oo) < oo
|
||||
|
||||
def __add__(self, other):
|
||||
return self.combine(self, other)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict__[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.__dict__[key]
|
||||
|
||||
def get(self, key, default):
|
||||
return self.__dict__.get(key, default)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__dict__)
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
def __repr__(self):
|
||||
return self.str(compact=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.str(newline=True, keyword_width=12)
|
||||
|
||||
def __lt__(self, other):
|
||||
try:
|
||||
return self["rop"] < other["rop"]
|
||||
except AttributeError:
|
||||
return self["rop"] < other
|
||||
|
||||
def __le__(self, other):
|
||||
try:
|
||||
return self["rop"] <= other["rop"]
|
||||
except AttributeError:
|
||||
return self["rop"] <= other
|
||||
@@ -1,15 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
class OutOfBoundsError(ValueError):
|
||||
"""
|
||||
Used to indicate a wrong value, for example δ < 1.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InsufficientSamplesError(ValueError):
|
||||
"""
|
||||
Used to indicate the number of samples given is too small.
|
||||
"""
|
||||
|
||||
pass
|
||||
@@ -1,252 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using Gröbner bases.
|
||||
|
||||
See :ref:`Arora-GB` for an overview.
|
||||
|
||||
"""
|
||||
from sage.all import (
|
||||
PowerSeriesRing,
|
||||
QQ,
|
||||
RR,
|
||||
oo,
|
||||
binomial,
|
||||
sqrt,
|
||||
ceil,
|
||||
floor,
|
||||
exp,
|
||||
log,
|
||||
pi,
|
||||
RealField,
|
||||
)
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .io import Logging
|
||||
|
||||
|
||||
def gb_cost(n, D, omega=2, prec=None):
|
||||
"""
|
||||
Estimate the complexity of computing a Gröbner basis.
|
||||
|
||||
:param n: Number of variables n > 0.
|
||||
:param D: Tuple of `(d,m)` pairs where `m` is number polynomials and `d` is a degree.
|
||||
:param omega: Linear algebra exponent, i.e. matrix-multiplication costs `O(n^ω)` operations.
|
||||
:param prec: Compute power series up to this precision (default: `2n`).
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.gb import gb_cost
|
||||
>>> gb_cost(128, [(2, 256)])
|
||||
rop: ≈2^144.6, dreg: 17, mem: ≈2^144.6
|
||||
|
||||
"""
|
||||
prec = 2 * n if prec is None else prec
|
||||
|
||||
R = PowerSeriesRing(QQ, "z", prec)
|
||||
z = R.gen()
|
||||
z = z.add_bigoh(prec)
|
||||
s = R(1)
|
||||
s = s.add_bigoh(prec)
|
||||
|
||||
for d, m in D:
|
||||
s *= (1 - z ** d) ** m
|
||||
s /= (1 - z) ** n
|
||||
|
||||
retval = Cost(rop=oo, dreg=oo)
|
||||
retval.register_impermanent({"rop": True, "dreg": False, "mem": False})
|
||||
|
||||
for dreg in range(prec):
|
||||
if s[dreg] < 0:
|
||||
break
|
||||
else:
|
||||
return retval
|
||||
|
||||
retval["dreg"] = dreg
|
||||
retval["rop"] = binomial(n + dreg, dreg) ** omega
|
||||
retval["mem"] = binomial(n + dreg, dreg) ** 2
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
class AroraGB:
|
||||
@staticmethod
|
||||
def ps_single(C):
|
||||
"""
|
||||
Probability that a Gaussian is within `C` standard deviations.
|
||||
"""
|
||||
RR = RealField(256)
|
||||
C = RR(C)
|
||||
return RR(1 - (RR(2) / (C * RR(sqrt(2 * pi))) * exp(-(C ** 2) / RR(2)))) # noqa
|
||||
|
||||
@classmethod
|
||||
def cost_bounded(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
|
||||
"""
|
||||
Estimate cost using absolute bounds for secrets and noise.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: target success probability
|
||||
:param omega: linear algebra constant.
|
||||
|
||||
"""
|
||||
d = params.Xe.bounds[1] - params.Xe.bounds[0] + 1
|
||||
dn = cls.equations_for_secret(params)
|
||||
cost = gb_cost(params.n, [(d, params.m)] + dn)
|
||||
cost["t"] = (d - 1) // 2
|
||||
if cost["dreg"] < oo and binomial(params.n + cost["dreg"], cost["dreg"]) < params.m:
|
||||
cost["m"] = binomial(params.n + cost["dreg"], cost["dreg"])
|
||||
else:
|
||||
cost["m"] = params.m
|
||||
cost.register_impermanent(t=False, m=True)
|
||||
return cost
|
||||
|
||||
@classmethod
|
||||
def cost_Gaussian_like(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
|
||||
"""
|
||||
Estimate cost using absolute bounds for secrets and Gaussian tail bounds for noise.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: target success probability
|
||||
:param omega: linear algebra constant.
|
||||
|
||||
"""
|
||||
dn = cls.equations_for_secret(params)
|
||||
|
||||
best, stuck = None, 0
|
||||
for t in range(ceil(params.Xe.stddev), params.n):
|
||||
d = 2 * t + 1
|
||||
C = RR(t / params.Xe.stddev)
|
||||
assert C >= 1 # if C is too small, we ignore it
|
||||
# Pr[success]^m = Pr[overall success]
|
||||
single_prob = AroraGB.ps_single(C)
|
||||
m_req = log(success_probability, 2) / log(single_prob, 2)
|
||||
m_req = floor(m_req)
|
||||
|
||||
if m_req > params.m:
|
||||
break
|
||||
|
||||
current = gb_cost(params.n, [(d, m_req)] + dn, omega)
|
||||
|
||||
if current["dreg"] == oo:
|
||||
continue
|
||||
|
||||
current["t"] = t
|
||||
current["m"] = m_req
|
||||
current.register_impermanent(t=False, m=True)
|
||||
current = current.reorder("rop", "m", "dreg", "t")
|
||||
|
||||
Logging.log("repeat", log_level + 1, f"{repr(current)}")
|
||||
|
||||
if best is None:
|
||||
best = current
|
||||
else:
|
||||
if best > current:
|
||||
best = current
|
||||
stuck = 0
|
||||
else:
|
||||
stuck += 1
|
||||
if stuck >= 5:
|
||||
break
|
||||
|
||||
if best is None:
|
||||
best = Cost(rop=oo, dreg=oo)
|
||||
return best
|
||||
|
||||
@classmethod
|
||||
def equations_for_secret(cls, params):
|
||||
"""
|
||||
Return ``(d,n)`` tuple to encode that `n` equations of degree `d` are available from the LWE secret.
|
||||
|
||||
:param params: LWE parameters.
|
||||
|
||||
"""
|
||||
if params.Xs <= params.Xe:
|
||||
a, b = params.Xs.bounds
|
||||
if b - a < oo:
|
||||
d = b - a + 1
|
||||
elif params.Xs.is_Gaussian_like:
|
||||
d = 2 * ceil(3 * params.Xs.stddev) + 1
|
||||
else:
|
||||
raise NotImplementedError(f"Do not know how to handle {params.Xs}.")
|
||||
dn = [(d, params.n)]
|
||||
else:
|
||||
dn = []
|
||||
return dn
|
||||
|
||||
def __call__(
|
||||
self, params: LWEParameters, success_probability=0.99, omega=2, log_level=1, **kwds
|
||||
):
|
||||
"""
|
||||
Arora-GB as described in [ICALP:AroGe11]_, [EPRINT:ACFP14]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: targeted success probability < 1.
|
||||
:param omega: linear algebra constant.
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``m``: Number of samples consumed.
|
||||
- ``dreg``: The degree of regularity or "solving degree".
|
||||
- ``t``: Polynomials of degree 2t + 1 are considered.
|
||||
- ``mem``: Total memory usage.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=7681, Xs=ND.DiscreteGaussian(3.0), Xe=ND.DiscreteGaussian(3.0), m=2**50)
|
||||
>>> LWE.arora_gb(params)
|
||||
rop: ≈2^307.1, m: ≈2^46.8, dreg: 99, t: 25, mem: ≈2^307.1, tag: arora-gb
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> LWE.arora_gb(params.updated(m=2**120))
|
||||
rop: ≈2^282.6, m: ≈2^101.1, dreg: 83, t: 36, mem: ≈2^282.6, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xe=ND.UniformMod(7)))
|
||||
rop: ≈2^60.6, dreg: 7, mem: ≈2^60.6, t: 3, m: ≈2^30.3, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xe=ND.CenteredBinomial(8)))
|
||||
rop: ≈2^122.3, dreg: 19, mem: ≈2^122.3, t: 8, m: ≈2^50.0, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(5), Xe=ND.CenteredBinomial(4), m=1024))
|
||||
rop: ≈2^227.2, dreg: 54, mem: ≈2^227.2, t: 4, m: 1024, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(3), Xe=ND.CenteredBinomial(4), m=1024))
|
||||
rop: ≈2^189.9, dreg: 39, mem: ≈2^189.9, t: 4, m: 1024, tag: arora-gb
|
||||
|
||||
.. [EPRINT:ACFP14] Martin R. Albrecht, Carlos Cid, Jean-Charles Faugère & Ludovic Perret. (2014).
|
||||
Algebraic algorithms for LWE. https://eprint.iacr.org/2014/1018
|
||||
|
||||
.. [ICALP:AroGe11] Sanjeev Aror & Rong Ge. (2011). New algorithms for learning in presence of
|
||||
errors. In L. Aceto, M. Henzinger, & J. Sgall, ICALP 2011, Part I (pp. 403–415).:
|
||||
Springer, Heidelberg.
|
||||
"""
|
||||
params = params.normalize()
|
||||
|
||||
best = Cost(rop=oo, dreg=oo)
|
||||
|
||||
if params.Xe.is_bounded:
|
||||
cost = self.cost_bounded(
|
||||
params,
|
||||
success_probability=success_probability,
|
||||
omega=omega,
|
||||
log_level=log_level,
|
||||
)
|
||||
Logging.log("gb", log_level, f"b: {repr(cost)}")
|
||||
best = min(best, cost, key=lambda x: x["dreg"])
|
||||
|
||||
if params.Xe.is_Gaussian_like:
|
||||
cost = self.cost_Gaussian_like(
|
||||
params,
|
||||
success_probability=success_probability,
|
||||
omega=omega,
|
||||
log_level=log_level,
|
||||
)
|
||||
Logging.log("gb", log_level, f"G: {repr(cost)}")
|
||||
best = min(best, cost, key=lambda x: x["dreg"])
|
||||
|
||||
best["tag"] = "arora-gb"
|
||||
best["problem"] = params
|
||||
return best
|
||||
|
||||
__name__ = "arora_gb"
|
||||
|
||||
|
||||
arora_gb = AroraGB()
|
||||
@@ -1,57 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
|
||||
class Logging:
|
||||
"""
|
||||
Control level of detail being printed.
|
||||
"""
|
||||
|
||||
plain_logger = logging.StreamHandler()
|
||||
plain_logger.setFormatter(logging.Formatter("%(message)s"))
|
||||
|
||||
detail_logger = logging.StreamHandler()
|
||||
detail_logger.setFormatter(logging.Formatter("[%(name)8s] %(message)s"))
|
||||
|
||||
logging.getLogger("estimator").handlers = [plain_logger]
|
||||
logging.getLogger("estimator").setLevel(logging.INFO)
|
||||
|
||||
loggers = ("batch", "bdd", "usvp", "bkw", "gb", "repeat", "guess", "bins", "dual")
|
||||
|
||||
CRITICAL = logging.CRITICAL
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
INFO = logging.INFO
|
||||
LEVEL0 = logging.INFO
|
||||
LEVEL1 = logging.INFO - 2
|
||||
LEVEL2 = logging.INFO - 4
|
||||
LEVEL3 = logging.INFO - 6
|
||||
LEVEL4 = logging.INFO - 8
|
||||
LEVEL5 = logging.DEBUG
|
||||
DEBUG = logging.DEBUG
|
||||
NOTSET = logging.NOTSET
|
||||
|
||||
for logger in loggers:
|
||||
logging.getLogger(logger).handlers = [detail_logger]
|
||||
logging.getLogger(logger).setLevel(logging.INFO)
|
||||
|
||||
@staticmethod
|
||||
def set_level(lvl, loggers=None):
|
||||
"""Set logging level
|
||||
|
||||
:param lvl: one of `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `LEVELX`, `DEBUG`, `NOTSET` with `X` ∈ [0,5]
|
||||
:param loggers: one of `Logging.loggers`, if `None` all loggers are used.
|
||||
|
||||
"""
|
||||
if loggers is None:
|
||||
loggers = Logging.loggers
|
||||
|
||||
for logger in loggers:
|
||||
logging.getLogger(logger).setLevel(lvl)
|
||||
|
||||
@classmethod
|
||||
def log(cls, logger, level, msg, *args, **kwds):
|
||||
level = int(level)
|
||||
return logging.getLogger(logger).log(
|
||||
cls.INFO - 2 * level, f"{{{level}}} " + msg, *args, **kwds
|
||||
)
|
||||
@@ -1,169 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
High-level LWE interface
|
||||
"""
|
||||
|
||||
from .lwe_primal import primal_usvp, primal_bdd, primal_hybrid
|
||||
from .lwe_bkw import coded_bkw
|
||||
from .lwe_guess import exhaustive_search, mitm, distinguish # noqa
|
||||
from .lwe_dual import dual, dual_hybrid
|
||||
from .lwe_guess import guess_composition
|
||||
from .gb import arora_gb # noqa
|
||||
from .lwe_parameters import LWEParameters as Parameters # noqa
|
||||
|
||||
|
||||
class Estimate:
|
||||
@classmethod
|
||||
def rough(cls, params, jobs=1):
|
||||
"""
|
||||
This function makes the following somewhat routine assumptions:
|
||||
|
||||
- The GSA holds.
|
||||
- The Core-SVP model holds.
|
||||
|
||||
This function furthermore assumes the following heuristics:
|
||||
|
||||
- The primal hybrid attack only applies to sparse secrets.
|
||||
- The dual hybrid MITM attack only applies to sparse secrets.
|
||||
- Arora-GB only applies to bounded noise with at least `n^2` samples.
|
||||
- BKW is not competitive.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param jobs: Use multiple threads in parallel.
|
||||
|
||||
EXAMPLE ::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> _ = lwe.estimate.rough(Kyber512)
|
||||
usvp :: rop: ≈2^118.6, red: ≈2^118.6, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
dual_hybrid :: rop: ≈2^127.2, mem: ≈2^123.3, m: 512, red: ≈2^127.0, δ: 1.003756, β: 435, ...
|
||||
|
||||
|
||||
"""
|
||||
# NOTE: Don't import these at the top-level to avoid circular imports
|
||||
from functools import partial
|
||||
from .reduction import ADPS16
|
||||
from .util import batch_estimate, f_name
|
||||
|
||||
algorithms = {}
|
||||
|
||||
algorithms["usvp"] = partial(primal_usvp, red_cost_model=ADPS16, red_shape_model="gsa")
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["hybrid"] = partial(
|
||||
primal_hybrid, red_cost_model=ADPS16, red_shape_model="gsa"
|
||||
)
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["dual_mitm_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=ADPS16, mitm_optimization=True
|
||||
)
|
||||
else:
|
||||
algorithms["dual_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=ADPS16, mitm_optimization=False
|
||||
)
|
||||
|
||||
if params.m > params.n ** 2 and params.Xe.is_bounded:
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["arora-gb"] = guess_composition(arora_gb.cost_bounded)
|
||||
else:
|
||||
algorithms["arora-gb"] = arora_gb.cost_bounded
|
||||
|
||||
res = batch_estimate(params, algorithms.values(), log_level=1, jobs=jobs)
|
||||
res = res[params]
|
||||
for algorithm in algorithms:
|
||||
for k, v in res.items():
|
||||
if f_name(algorithms[algorithm]) == k:
|
||||
print(f"{algorithm:20s} :: {repr(v)}")
|
||||
return res
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params,
|
||||
red_cost_model=None,
|
||||
red_shape_model=None,
|
||||
deny_list=("arora-gb",),
|
||||
add_list=tuple(),
|
||||
jobs=1,
|
||||
):
|
||||
"""
|
||||
Run all estimates.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param red_shape_model: How to model the shape of a reduced basis (applies to primal attacks)
|
||||
:param deny_list: skip these algorithms
|
||||
:param add_list: add these ``(name, function)`` pairs to the list of algorithms to estimate.a
|
||||
:param jobs: Use multiple threads in parallel.
|
||||
|
||||
EXAMPLE ::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> _ = lwe.estimate(Kyber512)
|
||||
bkw :: rop: ≈2^178.8, m: ≈2^166.8, mem: ≈2^167.8, b: 14, t1: 0, t2: 16, ℓ: 13, #cod: 448...
|
||||
usvp :: rop: ≈2^148.0, red: ≈2^148.0, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
bdd :: rop: ≈2^144.5, red: ≈2^143.8, svp: ≈2^143.0, β: 391, η: 421, d: 1013, tag: bdd
|
||||
dual :: rop: ≈2^169.9, mem: ≈2^130.0, m: 512, red: ≈2^169.7, δ: 1.003484, β: 484, d: 1024...
|
||||
dual_hybrid :: rop: ≈2^166.8, mem: ≈2^161.9, m: 512, red: ≈2^166.6, δ: 1.003541, β: 473, d: 1011...
|
||||
|
||||
"""
|
||||
from sage.all import oo
|
||||
from functools import partial
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import red_shape_model as red_shape_model_default
|
||||
from .util import batch_estimate, f_name
|
||||
|
||||
if red_cost_model is None:
|
||||
red_cost_model = red_cost_model_default
|
||||
if red_shape_model is None:
|
||||
red_shape_model = red_shape_model_default
|
||||
|
||||
algorithms = {}
|
||||
|
||||
algorithms["arora-gb"] = guess_composition(arora_gb)
|
||||
algorithms["bkw"] = coded_bkw
|
||||
|
||||
algorithms["usvp"] = partial(
|
||||
primal_usvp, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["bdd"] = partial(
|
||||
primal_bdd, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["hybrid"] = partial(
|
||||
primal_hybrid, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["dual"] = partial(dual, red_cost_model=red_cost_model)
|
||||
algorithms["dual_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=red_cost_model, mitm_optimization=False
|
||||
)
|
||||
algorithms["dual_mitm_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=red_cost_model, mitm_optimization=True
|
||||
)
|
||||
|
||||
for k in deny_list:
|
||||
del algorithms[k]
|
||||
for k, v in add_list:
|
||||
algorithms[k] = v
|
||||
|
||||
res_raw = batch_estimate(params, algorithms.values(), log_level=1, jobs=jobs)
|
||||
res_raw = res_raw[params]
|
||||
res = {}
|
||||
for algorithm in algorithms:
|
||||
for k, v in res_raw.items():
|
||||
if f_name(algorithms[algorithm]) == k:
|
||||
res[algorithm] = v
|
||||
|
||||
for algorithm in algorithms:
|
||||
for k, v in res.items():
|
||||
if algorithm == k:
|
||||
if v["rop"] == oo:
|
||||
continue
|
||||
if k == "hybrid" and res["bdd"]["rop"] < v["rop"]:
|
||||
continue
|
||||
if k == "dual_mitm_hybrid" and res["dual_hybrid"]["rop"] < v["rop"]:
|
||||
continue
|
||||
print(f"{algorithm:20s} :: {repr(v)}")
|
||||
return res
|
||||
|
||||
|
||||
estimate = Estimate()
|
||||
@@ -1,304 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
See :ref:`Coded-BKW for LWE` for what is available.
|
||||
"""
|
||||
from sage.all import ceil, log, floor, sqrt, var, find_root, erf, oo
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .errors import InsufficientSamplesError
|
||||
from .prob import amplify_sigma
|
||||
from .nd import sigmaf
|
||||
from .io import Logging
|
||||
|
||||
cfft = 1 # convolutions mod q
|
||||
|
||||
|
||||
class CodedBKW:
|
||||
@staticmethod
|
||||
def N(i, sigma_set, b, q):
|
||||
"""
|
||||
Return `N_i` for the `i`-th `[N_i, b]` linear code.
|
||||
|
||||
:param i: index
|
||||
:param sigma_set: target noise level
|
||||
"""
|
||||
return floor(b / (1 - log(12 * sigma_set ** 2 / 2 ** i, q) / 2))
|
||||
|
||||
@staticmethod
|
||||
def ntest(n, ell, t1, t2, b, q): # noqa
|
||||
"""
|
||||
If the parameter ``ntest`` is not provided, we use this function to estimate it.
|
||||
|
||||
:param n: LWE dimension > 0.
|
||||
:param ell: Table size for hypothesis testing.
|
||||
:param t1: Number of normal BKW steps.
|
||||
:param t2: Number of coded BKW steps.
|
||||
:param b: Table size for BKW steps.
|
||||
|
||||
"""
|
||||
|
||||
# there is no hypothesis testing because we have enough normal BKW
|
||||
# tables to cover all of of n
|
||||
if t1 * b >= n:
|
||||
return 0
|
||||
|
||||
# solve for nest by aiming for ntop == 0
|
||||
ntest = var("ntest")
|
||||
sigma_set = sqrt(q ** (2 * (1 - ell / ntest)) / 12)
|
||||
ncod = sum([CodedBKW.N(i, sigma_set, b, q) for i in range(1, t2 + 1)])
|
||||
ntop = n - ncod - ntest - t1 * b
|
||||
|
||||
try:
|
||||
start = max(int(round(find_root(ntop, 2, n - t1 * b + 1, rtol=0.1))) - 1, 2)
|
||||
except RuntimeError:
|
||||
start = 2
|
||||
ntest_min = 1
|
||||
for ntest in range(start, n - t1 * b + 1):
|
||||
if abs(ntop(ntest=ntest).n()) < abs(ntop(ntest=ntest_min).n()):
|
||||
ntest_min = ntest
|
||||
else:
|
||||
break
|
||||
return int(ntest_min)
|
||||
|
||||
def t1(params: LWEParameters, ell, t2, b, ntest=None):
|
||||
"""
|
||||
We compute t1 from N_i by observing that any N_i ≤ b gives no advantage over vanilla
|
||||
BKW, but the estimates for coded BKW always assume quantisation noise, which is too
|
||||
pessimistic for N_i ≤ b.
|
||||
"""
|
||||
|
||||
t1 = 0
|
||||
if ntest is None:
|
||||
ntest = CodedBKW.ntest(params.n, ell, t1, t2, b, params.q)
|
||||
sigma_set = sqrt(params.q ** (2 * (1 - ell / ntest)) / 12)
|
||||
Ni = [CodedBKW.N(i, sigma_set, b, params.q) for i in range(1, t2 + 1)]
|
||||
t1 = len([e for e in Ni if e <= b])
|
||||
# there is no point in having more tables than needed to cover n
|
||||
if b * t1 > params.n:
|
||||
t1 = params.n // b
|
||||
return t1
|
||||
|
||||
@staticmethod
|
||||
def cost(
|
||||
t2: int,
|
||||
b: int,
|
||||
ntest: int,
|
||||
params: LWEParameters,
|
||||
success_probability=0.99,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Coded-BKW cost.
|
||||
|
||||
:param t2: Number of coded BKW steps (≥ 0).
|
||||
:param b: Table size (≥ 1).
|
||||
:param success_probability: Targeted success probability < 1.
|
||||
:param ntest: Number of coordinates to hypothesis test.
|
||||
|
||||
"""
|
||||
cost = Cost()
|
||||
|
||||
# Our cost is mainly determined by q^b, on the other hand there are expressions in q^(ℓ+1)
|
||||
# below, hence, we set ℓ = b - 1. This allows to achieve the performance reported in
|
||||
# [C:GuoJohSta15].
|
||||
|
||||
cost["b"] = b
|
||||
ell = b - 1 # noqa
|
||||
cost["ell"] = ell
|
||||
|
||||
secret_bounds = params.Xs.bounds
|
||||
if params.Xs.is_Gaussian_like and params.Xs.mean == 0:
|
||||
secret_bounds = (
|
||||
max(secret_bounds[0], -3 * params.Xs.stddev),
|
||||
min(secret_bounds[1], 3 * params.Xs.stddev),
|
||||
)
|
||||
|
||||
# base of the table size.
|
||||
zeta = secret_bounds[1] - secret_bounds[0] + 1
|
||||
|
||||
t1 = CodedBKW.t1(params, ell, t2, b, ntest)
|
||||
t2 -= t1
|
||||
|
||||
cost["t1"] = t1
|
||||
cost["t2"] = t2
|
||||
|
||||
cost.register_impermanent(t1=False, t2=False)
|
||||
|
||||
# compute ntest with the t1 just computed
|
||||
if ntest is None:
|
||||
ntest = CodedBKW.ntest(params.n, ell, t1, t2, b, params.q)
|
||||
|
||||
# if there's no ntest then there's no `σ_{set}` and hence no ncod
|
||||
if ntest:
|
||||
sigma_set = sqrt(params.q ** (2 * (1 - ell / ntest)) / 12)
|
||||
ncod = sum([CodedBKW.N(i, sigma_set, b, params.q) for i in range(1, t2 + 1)])
|
||||
else:
|
||||
ncod = 0
|
||||
|
||||
ntot = ncod + ntest
|
||||
ntop = max(params.n - ncod - ntest - t1 * b, 0)
|
||||
cost["#cod"] = ncod # coding step
|
||||
cost["#top"] = ntop # guessing step, typically zero
|
||||
cost["#test"] = ntest # hypothesis testing
|
||||
|
||||
cost.register_impermanent({"#cod": False, "#top": False, "#test": False})
|
||||
|
||||
# Theorem 1: quantization noise + addition noise
|
||||
coding_variance = params.Xs.stddev ** 2 * sigma_set ** 2 * ntot
|
||||
sigma_final = float(sqrt(2 ** (t1 + t2) * params.Xe.stddev ** 2 + coding_variance))
|
||||
|
||||
M = amplify_sigma(success_probability, sigmaf(sigma_final), params.q)
|
||||
if M is oo:
|
||||
cost["rop"] = oo
|
||||
cost["m"] = oo
|
||||
return cost
|
||||
m = (t1 + t2) * (params.q ** b - 1) / 2 + M
|
||||
cost["m"] = float(m)
|
||||
cost.register_impermanent(m=True)
|
||||
|
||||
if not params.Xs <= params.Xe:
|
||||
# Equation (7)
|
||||
n = params.n - t1 * b
|
||||
C0 = (m - n) * (params.n + 1) * ceil(n / (b - 1))
|
||||
assert C0 >= 0
|
||||
else:
|
||||
C0 = 0
|
||||
|
||||
# Equation (8)
|
||||
C1 = sum(
|
||||
[(params.n + 1 - i * b) * (m - i * (params.q ** b - 1) / 2) for i in range(1, t1 + 1)]
|
||||
)
|
||||
assert C1 >= 0
|
||||
|
||||
# Equation (9)
|
||||
C2_ = sum(
|
||||
[
|
||||
4 * (M + i * (params.q ** b - 1) / 2) * CodedBKW.N(i, sigma_set, b, params.q)
|
||||
for i in range(1, t2 + 1)
|
||||
]
|
||||
)
|
||||
C2 = float(C2_)
|
||||
for i in range(1, t2 + 1):
|
||||
C2 += float(
|
||||
ntop + ntest + sum([CodedBKW.N(j, sigma_set, b, params.q) for j in range(1, i + 1)])
|
||||
) * (M + (i - 1) * (params.q ** b - 1) / 2)
|
||||
assert C2 >= 0
|
||||
|
||||
# Equation (10)
|
||||
C3 = M * ntop * (2 * zeta + 1) ** ntop
|
||||
assert C3 >= 0
|
||||
|
||||
# Equation (11)
|
||||
C4_ = 4 * M * ntest
|
||||
C4 = C4_ + (2 * zeta + 1) ** ntop * (
|
||||
cfft * params.q ** (ell + 1) * (ell + 1) * log(params.q, 2) + params.q ** (ell + 1)
|
||||
)
|
||||
assert C4 >= 0
|
||||
|
||||
C = (C0 + C1 + C2 + C3 + C4) / (
|
||||
erf(zeta / sqrt(2 * params.Xe.stddev)) ** ntop
|
||||
) # TODO don't ignore success probability
|
||||
cost["rop"] = float(C)
|
||||
cost["mem"] = (t1 + t2) * params.q ** b
|
||||
|
||||
cost = cost.reorder("rop", "m", "mem", "b", "t1", "t2")
|
||||
cost["tag"] = "coded-bkw"
|
||||
cost["problem"] = params
|
||||
Logging.log("bkw", log_level + 1, f"{repr(cost)}")
|
||||
|
||||
return cost
|
||||
|
||||
@classmethod
|
||||
def b(
|
||||
cls,
|
||||
params: LWEParameters,
|
||||
ntest=None,
|
||||
log_level=1,
|
||||
):
|
||||
def sf(x, best):
|
||||
return (x["rop"] <= best["rop"]) and (best["m"] > params.m or x["m"] <= params.m)
|
||||
|
||||
# the outer search is over b, which determines the size of the tables: q^b
|
||||
b_max = 3 * ceil(log(params.q, 2))
|
||||
with local_minimum(2, b_max, smallerf=sf) as it_b:
|
||||
for b in it_b:
|
||||
# the inner search is over t2, the number of coded steps
|
||||
t2_max = min(params.n // b, ceil(3 * log(params.q, 2)))
|
||||
with local_minimum(2, t2_max, smallerf=sf) as it_t2:
|
||||
for t2 in it_t2:
|
||||
y = cls.cost(b=b, t2=t2, ntest=ntest, params=params)
|
||||
it_t2.update(y)
|
||||
it_b.update(it_t2.y)
|
||||
best = it_b.y
|
||||
|
||||
# the search cannot fail. It just outputs some X with X["oracle"]>m.
|
||||
if best["m"] > params.m:
|
||||
raise InsufficientSamplesError(
|
||||
f"Got m≈2^{float(log(params.m, 2.0)):.1f} samples, but require ≈2^{float(log(best['m'],2.0)):.1f}.",
|
||||
best["m"],
|
||||
)
|
||||
return best
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
ntest=None,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Coded-BKW as described in [C:GuoJohSta15]_.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param ntest: Number of coordinates to hypothesis test.
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``b``: BKW tables have size `q^b`.
|
||||
- ``t1``: Number of plain BKW tables.
|
||||
- ``t2``: Number of Coded-BKW tables.
|
||||
- ``ℓ``: Hypothesis testing has tables of size `q^{ℓ+1}`
|
||||
- ``#cod``: Number of coding steps.
|
||||
- ``#top``: Number of guessing steps (typically zero)
|
||||
- ``#test``: Number of coordinates to do hypothesis testing on.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from sage.all import oo
|
||||
>>> from estimator import *
|
||||
>>> LWE.coded_bkw(LightSaber.updated(m=oo))
|
||||
rop: ≈2^171.7, m: ≈2^159.4, mem: ≈2^160.4, b: 12, t1: 3, t2: 18, ℓ: 11, #cod: 423, #top: 1...
|
||||
|
||||
We may need to amplify the number of samples, which modifies the noise distribution::
|
||||
|
||||
>>> from sage.all import oo
|
||||
>>> from estimator import *
|
||||
>>> LightSaber
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=2.00), m=512, tag='LightSaber')
|
||||
>>> cost = LWE.coded_bkw(LightSaber); cost
|
||||
rop: ≈2^184.3, m: ≈2^172.2, mem: ≈2^173.2, b: 13, t1: 0, t2: 18, ℓ: 12, #cod: 456, #top: 0...
|
||||
>>> cost["problem"]
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=10.39), m=..., tag='LightSaber')
|
||||
|
||||
.. note :: See also [C:KirFou15]_.
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
try:
|
||||
cost = self.b(params, ntest=ntest, log_level=log_level)
|
||||
except InsufficientSamplesError as e:
|
||||
m = e.args[1]
|
||||
while True:
|
||||
params_ = params.amplify_m(m)
|
||||
try:
|
||||
cost = self.b(params_, ntest=ntest, log_level=log_level)
|
||||
break
|
||||
except InsufficientSamplesError as e:
|
||||
m = e.args[1]
|
||||
|
||||
return cost
|
||||
|
||||
|
||||
coded_bkw = CodedBKW()
|
||||
@@ -1,526 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using dial attacks.
|
||||
|
||||
See :ref:`LWE Dual Attacks` for an introduction what is available.
|
||||
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
from dataclasses import replace
|
||||
|
||||
from sage.all import oo, ceil, sqrt, log, cached_function, exp
|
||||
from .reduction import delta as deltaf
|
||||
from .reduction import cost as costf
|
||||
from .reduction import ADPS16, BDGL16
|
||||
from .reduction import LLL
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify as prob_amplify
|
||||
from .io import Logging
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import mitm_opt as mitm_opt_default
|
||||
from .errors import OutOfBoundsError
|
||||
from .nd import NoiseDistribution
|
||||
from .lwe_guess import exhaustive_search, mitm, distinguish
|
||||
|
||||
|
||||
class DualHybrid:
|
||||
"""
|
||||
Estimate cost of solving LWE using dual attacks.
|
||||
"""
|
||||
|
||||
full_sieves = [ADPS16.__name__, BDGL16.__name__]
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def dual_reduce(
|
||||
delta: float,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
rho: float = 1.0,
|
||||
log_level=None,
|
||||
):
|
||||
"""
|
||||
Produce new LWE sample using a dual vector on first `n-ζ` coordinates of the secret. The
|
||||
length of the dual vector is given by `δ` in root Hermite form and using a possible
|
||||
scaling factor, i.e. `|v| = ρ ⋅ δ^d * q^((n-ζ)/d)`.
|
||||
|
||||
:param delta: Length of the vector in root Hermite form
|
||||
:param params: LWE parameters
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param rho: Factor introduced by obtaining multiple dual vectors
|
||||
:returns: new ``LWEParameters`` and ``m``
|
||||
|
||||
.. note :: This function assumes that the instance is normalized.
|
||||
|
||||
"""
|
||||
if not 0 <= zeta <= params.n:
|
||||
raise OutOfBoundsError(
|
||||
f"Splitting dimension {zeta} must be between 0 and n={params.n}."
|
||||
)
|
||||
|
||||
# Compute new secret distribution
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
if not 0 <= h1 <= h:
|
||||
raise OutOfBoundsError(f"Splitting weight {h1} must be between 0 and h={h}.")
|
||||
# assuming the non-zero entries are uniform
|
||||
p = h1 / 2
|
||||
red_Xs = NoiseDistribution.SparseTernary(params.n - zeta, h / 2 - p)
|
||||
slv_Xs = NoiseDistribution.SparseTernary(zeta, p)
|
||||
|
||||
if h1 == h:
|
||||
# no reason to do lattice reduction if we assume
|
||||
# that the hw on the reduction part is 0
|
||||
return replace(params, Xs=slv_Xs, m=oo), 1
|
||||
else:
|
||||
# distribution is i.i.d. for each coordinate
|
||||
red_Xs = replace(params.Xs, n=params.n - zeta)
|
||||
slv_Xs = replace(params.Xs, n=zeta)
|
||||
|
||||
c = red_Xs.stddev * params.q / params.Xe.stddev
|
||||
|
||||
# see if we have optimally many samples (as in [INDOCRYPT:EspJouKha20]) available
|
||||
m_ = max(1, ceil(sqrt(red_Xs.n * log(c) / log(delta))) - red_Xs.n)
|
||||
m_ = min(params.m, m_)
|
||||
|
||||
# Compute new noise as in [INDOCRYPT:EspJouKha20]
|
||||
sigma_ = rho * red_Xs.stddev * delta ** (m_ + red_Xs.n) / c ** (m_ / (m_ + red_Xs.n))
|
||||
slv_Xe = NoiseDistribution.DiscreteGaussian(params.q * sigma_)
|
||||
|
||||
slv_params = LWEParameters(
|
||||
n=zeta,
|
||||
q=params.q,
|
||||
Xs=slv_Xs,
|
||||
Xe=slv_Xe,
|
||||
)
|
||||
|
||||
# The m_ we compute there is the optimal number of samples that we pick from the input LWE
|
||||
# instance. We then need to return it because it determines the lattice dimension for the
|
||||
# reduction.
|
||||
|
||||
return slv_params, m_
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
beta: int,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=None,
|
||||
):
|
||||
"""
|
||||
Computes the cost of the dual hybrid attack that dual reduces the LWE instance and then
|
||||
uses the given solver to solve the reduced instance.
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param beta: Block size used to produce short dual vectors for dual reduction
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: Use LLL calls to produce more small vectors
|
||||
|
||||
.. note :: This function assumes that the instance is normalized. It runs no optimization,
|
||||
it merely reports costs.
|
||||
|
||||
"""
|
||||
Logging.log("dual", log_level, f"β={beta}, ζ={zeta}, h1={h1}")
|
||||
|
||||
delta = deltaf(beta)
|
||||
|
||||
if red_cost_model.__name__ in DualHybrid.full_sieves:
|
||||
rho = 4.0 / 3
|
||||
elif use_lll:
|
||||
rho = 2.0
|
||||
else:
|
||||
rho = 1.0
|
||||
|
||||
params_slv, m_ = DualHybrid.dual_reduce(
|
||||
delta, params, zeta, h1, rho, log_level=log_level + 1
|
||||
)
|
||||
Logging.log("dual", log_level + 1, f"red LWE instance: {repr(params_slv)}")
|
||||
|
||||
cost_slv = solver(params_slv, success_probability)
|
||||
Logging.log("dual", log_level + 2, f"solve: {repr(cost_slv)}")
|
||||
|
||||
d = m_ + params.n - zeta
|
||||
cost_red = costf(red_cost_model, beta, d)
|
||||
if red_cost_model.__name__ in DualHybrid.full_sieves:
|
||||
# if we use full sieving, we get many short vectors
|
||||
# we compute in logs to avoid overflows in case m
|
||||
# or beta happen to be large
|
||||
try:
|
||||
log_rep = max(0, log(cost_slv["m"]) - (beta / 2) * log(4 / 3))
|
||||
if log_rep > 10 ** 10:
|
||||
# sage's ceil function seems to completely freak out for large
|
||||
# inputs, but then m is huge, so unlikely to be relevant
|
||||
raise OverflowError()
|
||||
cost_red = cost_red.repeat(ceil(exp(log_rep)))
|
||||
except OverflowError:
|
||||
# if we get an overflow, m must be huge
|
||||
# so we can probably approximate the cost with
|
||||
# oo for our purposes
|
||||
return Cost(rop=oo)
|
||||
elif use_lll:
|
||||
cost_red["rop"] += cost_slv["m"] * LLL(d, log(params.q, 2))
|
||||
cost_red["repetitions"] = cost_slv["m"]
|
||||
else:
|
||||
cost_red = cost_red.repeat(cost_slv["m"])
|
||||
|
||||
Logging.log("dual", log_level + 2, f"red: {repr(cost_red)}")
|
||||
|
||||
total_cost = cost_slv.combine(cost_red)
|
||||
total_cost["m"] = m_
|
||||
total_cost["rop"] = cost_red["rop"] + cost_slv["rop"]
|
||||
total_cost["mem"] = cost_slv["mem"]
|
||||
|
||||
if d < params.n - zeta:
|
||||
raise RuntimeError(f"{d} < {params.n - zeta}, {params.n}, {zeta}, {m_}")
|
||||
total_cost["d"] = d
|
||||
|
||||
Logging.log("dual", log_level, f"{repr(total_cost)}")
|
||||
|
||||
rep = 1
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
probability = prob_drop(params.n, h, zeta, h1)
|
||||
rep = prob_amplify(success_probability, probability)
|
||||
# don't need more samples to re-run attack, since we may
|
||||
# just guess different components of the secret
|
||||
return total_cost.repeat(times=rep, select={"m": False})
|
||||
|
||||
@staticmethod
|
||||
def optimize_blocksize(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=5,
|
||||
opt_step=2,
|
||||
):
|
||||
"""
|
||||
Optimizes the cost of the dual hybrid attack over the block size β.
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: Use LLL calls to produce more small vectors
|
||||
:param opt_step: control robustness of optimizer
|
||||
|
||||
.. note :: This function assumes that the instance is normalized. ζ and h1 are fixed.
|
||||
|
||||
"""
|
||||
|
||||
f = partial(
|
||||
DualHybrid.cost,
|
||||
solver=solver,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
h1=h1,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level,
|
||||
)
|
||||
# don't have a reliable upper bound for beta
|
||||
# we choose n - k arbitrarily and adjust later if
|
||||
# necessary
|
||||
beta_upper = max(params.n - zeta, 40)
|
||||
beta = beta_upper
|
||||
while beta == beta_upper:
|
||||
beta_upper *= 2
|
||||
with local_minimum(2, beta_upper, opt_step) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta=beta))
|
||||
for beta in it.neighborhood:
|
||||
it.update(f(beta=beta))
|
||||
cost = it.y
|
||||
beta = cost["beta"]
|
||||
|
||||
cost["zeta"] = zeta
|
||||
if params.Xs.is_sparse:
|
||||
cost["h1"] = h1
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
opt_step=2,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Optimizes the cost of the dual hybrid attack (using the given solver) over
|
||||
all attack parameters: block size β, splitting dimension ζ, and
|
||||
splitting weight h1 (in case the secret distribution is sparse). Since
|
||||
the cost function for the dual hybrid might only be convex in an approximate
|
||||
sense, the parameter ``opt_step`` allows to make the optimization procedure more
|
||||
robust against local irregularities (higher value) at the cost of a longer
|
||||
running time. In a nutshell, if the cost of the dual hybrid seems suspiciosly
|
||||
high, try a larger ``opt_step`` (e.g. 4 or 8).
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: use LLL calls to produce more small vectors
|
||||
:param opt_step: control robustness of optimizer
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``h1``: Number of non-zero components among guessed coordinates (if secret distribution is sparse)
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
- When ζ = 1 this function essentially estimates the dual attack.
|
||||
- When ζ > 1 and ``solver`` is ``exhaustive_search`` this function estimates
|
||||
the hybrid attack as given in [INDOCRYPT:EspJouKha20]_
|
||||
- When ζ > 1 and ``solver`` is ``mitm`` this function estimates the dual MITM
|
||||
hybrid attack roughly following [EPRINT:CHHS19]_
|
||||
|
||||
EXAMPLES::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=1024, q = 2**32, Xs=ND.Uniform(0,1), Xe=ND.DiscreteGaussian(3.0))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^115.5, mem: ≈2^68.0, m: 1018, red: ≈2^115.4, δ: 1.005021, β: 284, d: 2042, ↻: ≈2^68.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^111.3, mem: ≈2^106.4, m: 983, red: ≈2^111.2, δ: 1.005204, β: 269, d: 1957, ↻: ≈2^56.4, ζ: 50...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^141.1, mem: ≈2^139.1, m: 1189, k: 132, ↻: 139, red: ≈2^140.8, δ: 1.004164, β: 375, d: 2021...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization="numerical")
|
||||
rop: ≈2^140.6, m: 1191, k: 128, mem: ≈2^136.0, ↻: 133, red: ≈2^140.2, δ: 1.004179, β: 373, d: 2052...
|
||||
|
||||
>>> params = params.updated(Xs=ND.SparseTernary(params.n, 32))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^111.7, mem: ≈2^66.0, m: 950, red: ≈2^111.5, δ: 1.005191, β: 270, d: 1974, ↻: ≈2^66.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^97.8, mem: ≈2^81.9, m: 730, red: ≈2^97.4, δ: 1.006813, β: 175, d: 1453, ↻: ≈2^36.3, ζ: 301...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^103.4, mem: ≈2^81.5, m: 724, k: 310, ↻: ≈2^27.3, red: ≈2^102.7, δ: 1.006655, β: 182...
|
||||
|
||||
>>> params = params.updated(Xs=ND.CenteredBinomial(8))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^123.1, mem: ≈2^75.4, m: 1151, red: ≈2^123.0, δ: 1.004727, β: 311, d: 2175, ↻: ≈2^75.4, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^122.4, mem: ≈2^116.6, m: 1143, red: ≈2^122.2, δ: 1.004758, β: 308, d: 2157, ↻: ≈2^75.8, ζ: 10...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^181.7, mem: ≈2^179.2, m: 1554, k: 42, ↻: 179, red: ≈2^181.4, δ: 1.003315, β: 519, d: 2513...
|
||||
|
||||
>>> params = params.updated(Xs=ND.DiscreteGaussian(3.0))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^125.4, mem: ≈2^78.0, m: 1190, red: ≈2^125.3, δ: 1.004648, β: 319, d: 2214, ↻: ≈2^78.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^125.1, mem: ≈2^117.7, m: 1187, red: ≈2^125.0, δ: 1.004657, β: 318, d: 2204, ↻: ≈2^75.9, ζ: 7...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^175.0, mem: ≈2^168.9, m: 1547, k: 27, ↻: 169, red: ≈2^175.0, δ: 1.003424, β: 496, d: 2544, ζ: 27...
|
||||
"""
|
||||
|
||||
Cost.register_impermanent(
|
||||
rop=True,
|
||||
mem=False,
|
||||
red=True,
|
||||
beta=False,
|
||||
delta=False,
|
||||
m=True,
|
||||
d=False,
|
||||
zeta=False,
|
||||
)
|
||||
Logging.log("dual", log_level, f"costing LWE instance: {repr(params)}")
|
||||
|
||||
params = params.normalize()
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
Cost.register_impermanent(h1=False)
|
||||
|
||||
def _optimize_blocksize(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=None,
|
||||
):
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
h1_min = max(0, h - (params.n - zeta))
|
||||
h1_max = min(zeta, h)
|
||||
Logging.log("dual", log_level, f"h1 ∈ [{h1_min},{h1_max}] (zeta={zeta})")
|
||||
with local_minimum(h1_min, h1_max, log_level=log_level + 1) as it:
|
||||
for h1 in it:
|
||||
cost = self.optimize_blocksize(
|
||||
h1=h1,
|
||||
solver=solver,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level + 2,
|
||||
)
|
||||
it.update(cost)
|
||||
return it.y
|
||||
|
||||
else:
|
||||
_optimize_blocksize = self.optimize_blocksize
|
||||
|
||||
f = partial(
|
||||
_optimize_blocksize,
|
||||
solver=solver,
|
||||
params=params,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level + 1,
|
||||
)
|
||||
|
||||
with local_minimum(1, params.n - 1, opt_step) as it:
|
||||
for zeta in it:
|
||||
it.update(f(zeta=zeta))
|
||||
for zeta in it.neighborhood:
|
||||
it.update(f(zeta=zeta))
|
||||
cost = it.y
|
||||
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
|
||||
DH = DualHybrid()
|
||||
|
||||
|
||||
def dual(
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
):
|
||||
"""
|
||||
Dual hybrid attack as in [PQCBook:MicReg09]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: The success probability to target.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param use_lll: use LLL calls to produce more small vectors.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
"""
|
||||
Cost.register_impermanent(
|
||||
rop=True,
|
||||
mem=False,
|
||||
red=True,
|
||||
beta=False,
|
||||
delta=False,
|
||||
m=True,
|
||||
d=False,
|
||||
)
|
||||
|
||||
ret = DH.optimize_blocksize(
|
||||
solver=distinguish,
|
||||
params=params,
|
||||
zeta=0,
|
||||
h1=0,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=1,
|
||||
)
|
||||
del ret["zeta"]
|
||||
if hasattr(ret, "h1"):
|
||||
del ret["h1"]
|
||||
ret["tag"] = "dual"
|
||||
return ret
|
||||
|
||||
|
||||
def dual_hybrid(
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
mitm_optimization=False,
|
||||
opt_step=2,
|
||||
):
|
||||
"""
|
||||
Dual hybrid attack from [INDOCRYPT:EspJouKha20]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: The success probability to target.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param use_lll: Use LLL calls to produce more small vectors.
|
||||
:param mitm_optimization: One of "analytical" or "numerical". If ``True`` a default from the
|
||||
``conf`` module is picked, ``False`` disables MITM.
|
||||
:param opt_step: Control robustness of optimizer.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``h1``: Number of non-zero components among guessed coordinates (if secret distribution is sparse)
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
"""
|
||||
|
||||
if mitm_optimization is True:
|
||||
mitm_optimization = mitm_opt_default
|
||||
|
||||
if mitm_optimization:
|
||||
solver = partial(mitm, optimization=mitm_optimization)
|
||||
else:
|
||||
solver = exhaustive_search
|
||||
|
||||
ret = DH(
|
||||
solver=solver,
|
||||
params=params,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
opt_step=opt_step,
|
||||
)
|
||||
if mitm_optimization:
|
||||
ret["tag"] = "dual_mitm_hybrid"
|
||||
else:
|
||||
ret["tag"] = "dual_hybrid"
|
||||
return ret
|
||||
@@ -1,417 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Generic multiplicative composition of guessing some components of the LWE secret and some LWE solving algorithm.
|
||||
|
||||
By "multiplicative" we mean that costs multiply rather than add. It is often possible to achieve
|
||||
some form of additive composition, i.e. this strategy is rarely the most efficient.
|
||||
|
||||
"""
|
||||
|
||||
from sage.all import log, floor, ceil, binomial
|
||||
from sage.all import sqrt, pi, exp, RR, ZZ, oo, round, e
|
||||
|
||||
from .conf import mitm_opt
|
||||
from .cost import Cost
|
||||
from .errors import InsufficientSamplesError, OutOfBoundsError
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .prob import amplify as prob_amplify
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify_sigma
|
||||
from .util import local_minimum
|
||||
from .nd import sigmaf
|
||||
|
||||
|
||||
def log2(x):
|
||||
return log(x, 2)
|
||||
|
||||
|
||||
class guess_composition:
|
||||
def __init__(self, f):
|
||||
"""
|
||||
Create a generic composition of guessing and `f`.
|
||||
"""
|
||||
self.f = f
|
||||
self.__name__ = f"{f.__name__}+guessing"
|
||||
|
||||
@classmethod
|
||||
def dense_solve(cls, f, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a dense secret then call `f`.
|
||||
|
||||
:param f: Some object consuming `params` and outputting some `cost`
|
||||
:param params: LWE parameters.
|
||||
|
||||
"""
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0] + 1
|
||||
|
||||
baseline_cost = f(params, **kwds)
|
||||
|
||||
max_zeta = min(floor(log(baseline_cost["rop"], base)), params.n)
|
||||
|
||||
with local_minimum(0, max_zeta, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
search_space = base ** zeta
|
||||
cost = f(params.updated(n=params.n - zeta), log_level=log_level + 1, **kwds)
|
||||
repeated_cost = cost.repeat(search_space)
|
||||
repeated_cost["zeta"] = zeta
|
||||
it.update(repeated_cost)
|
||||
return it.y
|
||||
|
||||
@classmethod
|
||||
def gammaf(cls, n, h, zeta, base, g=lambda x: x):
|
||||
"""
|
||||
Find optimal hamming weight for sparse guessing.
|
||||
|
||||
Let `s` be a vector of dimension `n` where we expect `h` non-zero entries. We are ignoring `η-γ`
|
||||
components and are guessing `γ`. This succeeds with some probability given by ``prob_drop(n, h,
|
||||
ζ, γ)``. Exhaustively searching the guesses takes `binomial(n, γ) ⋅ b^γ` steps where `b` is the
|
||||
number of non-zero values in a component of `s`. We call a `γ` optimal if it minimizes the
|
||||
overall number of repetitions that need to be performed to succeed with probability 99%.
|
||||
|
||||
:param n: vector dimension
|
||||
:param h: hamming weight of the vector
|
||||
:param zeta: number of ignored + guesses components
|
||||
:param base: number of possible non-zero scalars
|
||||
:param g: We do not consider search space directly by `g()` applied to it (think time-memory
|
||||
trade-offs).
|
||||
:returns: (number of repetitions, γ, size of the search space, probability of success)
|
||||
|
||||
"""
|
||||
if not zeta:
|
||||
return 1, 0, 0, 1.0
|
||||
|
||||
search_space = 0
|
||||
gamma = 0
|
||||
probability = 0
|
||||
best = None, None, None, None
|
||||
while gamma < min(h, zeta):
|
||||
probability += prob_drop(n, h, zeta, fail=gamma)
|
||||
search_space += binomial(zeta, gamma) * base ** gamma
|
||||
repeat = prob_amplify(0.99, probability) * g(search_space)
|
||||
if best[0] is None or repeat < best[0]:
|
||||
best = repeat, gamma, search_space, probability
|
||||
gamma += 1
|
||||
else:
|
||||
break
|
||||
return best
|
||||
|
||||
@classmethod
|
||||
def sparse_solve(cls, f, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a sparse secret then call `f`.
|
||||
|
||||
:param f: Some object consuming `params` and outputting some `cost`
|
||||
:param params: LWE parameters.
|
||||
"""
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0] # we exclude zero
|
||||
h = ceil(len(params.Xs) * params.Xs.density) # nr of non-zero entries
|
||||
|
||||
with local_minimum(0, params.n - 40, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
single_cost = f(params.updated(n=params.n - zeta), log_level=log_level + 1, **kwds)
|
||||
repeat, gamma, search_space, probability = cls.gammaf(params.n, h, zeta, base)
|
||||
cost = single_cost.repeat(repeat)
|
||||
cost["zeta"] = zeta
|
||||
cost["|S|"] = search_space
|
||||
cost["prop"] = probability
|
||||
it.update(cost)
|
||||
return it.y
|
||||
|
||||
def __call__(self, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a secret then call `f`.
|
||||
|
||||
:param params: LWE parameters.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> from estimator.lwe_guess import guess_composition
|
||||
>>> guess_composition(LWE.primal_usvp)(Kyber512.updated(Xs=ND.SparseTernary(512, 16)))
|
||||
rop: ≈2^102.8, red: ≈2^102.8, δ: 1.008705, β: 113, d: 421, tag: usvp, ↻: ≈2^37.5, ζ: 265, |S|: 1, ...
|
||||
|
||||
Compare::
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)))
|
||||
rop: ≈2^86.6, red: ≈2^85.7, svp: ≈2^85.6, β: 104, η: 2, ζ: 371, |S|: ≈2^91.1, d: 308, prob: ≈2^-21.3, ...
|
||||
|
||||
"""
|
||||
if params.Xs.is_sparse:
|
||||
return self.sparse_solve(self.f, params, log_level, **kwds)
|
||||
else:
|
||||
return self.dense_solve(self.f, params, log_level, **kwds)
|
||||
|
||||
|
||||
class ExhaustiveSearch:
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99, quantum: bool = False):
|
||||
"""
|
||||
Estimate cost of solving LWE via exhaustive search.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:param quantum: use estimate for quantum computer (we simply take the square root of the search space)
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish the correct solution with high probability.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> exhaustive_search(params)
|
||||
rop: ≈2^73.6, mem: ≈2^72.6, m: 397.198
|
||||
>>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> exhaustive_search(params)
|
||||
rop: ≈2^417.3, mem: ≈2^416.3, m: ≈2^11.2
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# there are two stages: enumeration and distinguishing, so we split up the success_probability
|
||||
probability = sqrt(success_probability)
|
||||
|
||||
try:
|
||||
size = params.Xs.support_size(n=params.n, fraction=probability)
|
||||
except NotImplementedError:
|
||||
# not achieving required probability with search space
|
||||
# given our settings that means the search space is huge
|
||||
# so we approximate the cost with oo
|
||||
return Cost(rop=oo, mem=oo, m=1)
|
||||
|
||||
if quantum:
|
||||
size = size.sqrt()
|
||||
|
||||
# set m according to [ia.cr/2020/515]
|
||||
sigma = params.Xe.stddev / params.q
|
||||
m_required = RR(
|
||||
8 * exp(4 * pi * pi * sigma * sigma) * (log(size) - log(log(1 / probability)))
|
||||
)
|
||||
|
||||
if params.m < m_required:
|
||||
raise InsufficientSamplesError(
|
||||
f"Exhaustive search: Need {m_required} samples but only {params.m} available."
|
||||
)
|
||||
else:
|
||||
m = m_required
|
||||
|
||||
# we can compute A*s for all candidate s in time 2*size*m using
|
||||
# (the generalization [ia.cr/2021/152] of) the recursive algorithm
|
||||
# from [ia.cr/2020/515]
|
||||
cost = 2 * size * m
|
||||
|
||||
ret = Cost(rop=cost, mem=cost / 2, m=m)
|
||||
return ret
|
||||
|
||||
__name__ = "exhaustive_search"
|
||||
|
||||
|
||||
exhaustive_search = ExhaustiveSearch()
|
||||
|
||||
|
||||
class MITM:
|
||||
|
||||
locality = 0.05
|
||||
|
||||
def X_range(self, nd):
|
||||
if nd.is_bounded:
|
||||
a, b = nd.bounds
|
||||
return b - a + 1, 1.0
|
||||
else:
|
||||
# setting fraction=0 to ensure that support size does not
|
||||
# throw error. we'll take the probability into account later
|
||||
rng = nd.support_size(n=1, fraction=0.0)
|
||||
return rng, nd.gaussian_tail_prob
|
||||
|
||||
def local_range(self, center):
|
||||
return ZZ(floor((1 - self.locality) * center)), ZZ(ceil((1 + self.locality) * center))
|
||||
|
||||
def mitm_analytical(self, params: LWEParameters, success_probability=0.99):
|
||||
nd_rng, nd_p = self.X_range(params.Xe)
|
||||
delta = nd_rng / params.q # possible error range scaled
|
||||
|
||||
sd_rng, sd_p = self.X_range(params.Xs)
|
||||
|
||||
# determine the number of elements in the tables depending on splitting dim
|
||||
n = params.n
|
||||
k = round(n / (2 - delta))
|
||||
# we could now call self.cost with this k, but using our model below seems
|
||||
# about 3x faster and reasonably accurate
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(n=params.n)
|
||||
split_h = round(h * k / n)
|
||||
success_probability_ = (
|
||||
binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h)
|
||||
)
|
||||
|
||||
logT = RR(h * (log2(n) - log2(h) + log2(sd_rng - 1) + log2(e))) / (2 - delta)
|
||||
logT -= RR(log2(h) / 2)
|
||||
logT -= RR(h * h * log2(e) / (2 * n * (2 - delta) ** 2))
|
||||
else:
|
||||
success_probability_ = 1.0
|
||||
logT = k * log(sd_rng, 2)
|
||||
|
||||
m_ = max(1, round(logT + log(logT, 2)))
|
||||
if params.m < m_:
|
||||
raise InsufficientSamplesError(
|
||||
f"MITM: Need {m_} samples but only {params.m} available."
|
||||
)
|
||||
|
||||
# since m = logT + loglogT and rop = T*m, we have rop=2^m
|
||||
ret = Cost(rop=RR(2 ** m_), mem=2 ** logT * m_, m=m_, k=ZZ(k))
|
||||
repeat = prob_amplify(success_probability, sd_p ** n * nd_p ** m_ * success_probability_)
|
||||
return ret.repeat(times=repeat)
|
||||
|
||||
def cost(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
k: int,
|
||||
success_probability=0.99,
|
||||
):
|
||||
nd_rng, nd_p = self.X_range(params.Xe)
|
||||
delta = nd_rng / params.q # possible error range scaled
|
||||
|
||||
sd_rng, sd_p = self.X_range(params.Xs)
|
||||
n = params.n
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(n=n)
|
||||
|
||||
# we assume the hamming weight to be distributed evenly across the two parts
|
||||
# if not we can rerandomize on the coordinates and try again -> repeat
|
||||
split_h = round(h * k / n)
|
||||
size_tab = RR((sd_rng - 1) ** split_h * binomial(k, split_h))
|
||||
size_sea = RR((sd_rng - 1) ** (h - split_h) * binomial(n - k, h - split_h))
|
||||
success_probability_ = (
|
||||
binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h)
|
||||
)
|
||||
else:
|
||||
size_tab = sd_rng ** k
|
||||
size_sea = sd_rng ** (n - k)
|
||||
success_probability_ = 1
|
||||
|
||||
# we set m such that it approximately minimizes the search cost per query as
|
||||
# a reasonable starting point and then optimize around it
|
||||
m_ = ceil(max(log2(size_tab) + log2(log2(size_tab)), 1))
|
||||
a, b = self.local_range(m_)
|
||||
with local_minimum(a, b, smallerf=lambda x, best: x[1] <= best[1]) as it:
|
||||
for m in it:
|
||||
# for search we effectively build a second table and for each entry, we expect
|
||||
# 2^( m * 4 * B / q) = 2^(delta * m) table look ups + a l_oo computation (costing m)
|
||||
# for every hit in the table (which has probability T/2^m)
|
||||
cost = (m, size_sea * (2 * m + 2 ** (delta * m) * (1 + size_tab * m / 2 ** m)))
|
||||
it.update(cost)
|
||||
m, cost_search = it.y
|
||||
m = min(m, params.m)
|
||||
|
||||
# building the table costs 2*T*m using the generalization [ia.cr/2021/152] of
|
||||
# the recursive algorithm from [ia.cr/2020/515]
|
||||
cost_table = size_tab * 2 * m
|
||||
|
||||
ret = Cost(rop=(cost_table + cost_search), m=m, k=k)
|
||||
ret["mem"] = size_tab * (k + m) + size_sea * (n - k + m)
|
||||
repeat = prob_amplify(success_probability, sd_p ** n * nd_p ** m * success_probability_)
|
||||
return ret.repeat(times=repeat)
|
||||
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99, optimization=mitm_opt):
|
||||
"""
|
||||
Estimate cost of solving LWE via Meet-In-The-Middle attack.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:param model: Either "analytical" (faster, default) or "numerical" (more accurate)
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish the correct solution with high probability.
|
||||
- ``k``: Splitting dimension.
|
||||
- ``↻``: Repetitions required to achieve targeted success probability
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> mitm(params)
|
||||
rop: ≈2^37.0, mem: ≈2^37.2, m: 37, k: 32, ↻: 1
|
||||
>>> mitm(params, optimization="numerical")
|
||||
rop: ≈2^39.2, m: 36, k: 32, mem: ≈2^39.1, ↻: 1
|
||||
>>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> mitm(params)
|
||||
rop: ≈2^215.4, mem: ≈2^210.2, m: ≈2^13.1, k: 512, ↻: 43
|
||||
>>> mitm(params, optimization="numerical")
|
||||
rop: ≈2^216.0, m: ≈2^13.1, k: 512, mem: ≈2^211.4, ↻: 43
|
||||
|
||||
"""
|
||||
Cost.register_impermanent(rop=True, mem=False, m=True, k=False)
|
||||
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
nd_rng, _ = self.X_range(params.Xe)
|
||||
if nd_rng >= params.q:
|
||||
# MITM attacks cannot handle an error this large.
|
||||
return Cost(rop=oo, mem=oo, m=0, k=0)
|
||||
|
||||
if "analytical" in optimization:
|
||||
return self.mitm_analytical(params=params, success_probability=success_probability)
|
||||
elif "numerical" in optimization:
|
||||
with local_minimum(1, params.n - 1) as it:
|
||||
for k in it:
|
||||
cost = self.cost(k=k, params=params, success_probability=success_probability)
|
||||
it.update(cost)
|
||||
ret = it.y
|
||||
# if the noise is large, the curve might not be convex, so the above minimum
|
||||
# is not correct. Interestingly, in these cases, it seems that k=1 might be smallest
|
||||
ret1 = self.cost(k=1, params=params, success_probability=success_probability)
|
||||
return min(ret, ret1)
|
||||
else:
|
||||
raise ValueError("Unknown optimization method for MITM.")
|
||||
|
||||
__name__ = "mitm"
|
||||
|
||||
|
||||
mitm = MITM()
|
||||
|
||||
|
||||
class Distinguisher:
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99):
|
||||
"""
|
||||
Estimate cost of distinguishing a 0-dimensional LWE instance from uniformly random,
|
||||
which is essentially the number of samples required.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=0, q=2 ** 32, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(2 ** 32))
|
||||
>>> distinguish(params)
|
||||
rop: ≈2^60.0, mem: ≈2^60.0, m: ≈2^60.0
|
||||
|
||||
"""
|
||||
|
||||
if params.n > 0:
|
||||
raise OutOfBoundsError("Secret dimension should be 0 for distinguishing. Try exhaustive search for n > 0.")
|
||||
m = amplify_sigma(success_probability, sigmaf(params.Xe.stddev), params.q)
|
||||
if (m > params.m):
|
||||
raise InsufficientSamplesError("Not enough samples to distinguish with target advantage.")
|
||||
return Cost(rop=m, mem=m, m=m)
|
||||
|
||||
__name__ = "distinguish"
|
||||
|
||||
|
||||
distinguish = Distinguisher()
|
||||
@@ -1,161 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import oo, binomial, log, sqrt, ceil
|
||||
from dataclasses import dataclass
|
||||
from copy import copy
|
||||
from .nd import NoiseDistribution
|
||||
from .errors import InsufficientSamplesError
|
||||
|
||||
|
||||
@dataclass
|
||||
class LWEParameters:
|
||||
n: int
|
||||
q: int
|
||||
Xs: NoiseDistribution
|
||||
Xe: NoiseDistribution
|
||||
m: int = oo
|
||||
tag: str = None
|
||||
|
||||
def __post_init__(self, **kwds):
|
||||
self.Xs = copy(self.Xs)
|
||||
self.Xs.n = self.n
|
||||
if self.m < oo:
|
||||
self.Xe = copy(self.Xe)
|
||||
self.Xe.n = self.m
|
||||
|
||||
def normalize(self):
|
||||
"""
|
||||
EXAMPLES:
|
||||
|
||||
We perform the normal form transformation if χ_e < χ_s and we got the samples::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Xs=ND.DiscreteGaussian(2.0)
|
||||
>>> Xe=ND.DiscreteGaussian(1.58)
|
||||
>>> LWE.Parameters(n=512, q=8192, Xs=Xs, Xe=Xe).normalize()
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=1.58), m=+Infinity, tag=None)
|
||||
|
||||
If m = n, we swap the secret and the noise::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Xs=ND.DiscreteGaussian(2.0)
|
||||
>>> Xe=ND.DiscreteGaussian(1.58)
|
||||
>>> LWE.Parameters(n=512, q=8192, Xs=Xs, Xe=Xe, m=512).normalize()
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=2.00), m=512, tag=None)
|
||||
|
||||
"""
|
||||
if self.m < 1:
|
||||
raise InsufficientSamplesError(f"m={self.m} < 1")
|
||||
|
||||
# Normal form transformation
|
||||
if self.Xe < self.Xs and self.m >= 2 * self.n:
|
||||
return LWEParameters(
|
||||
n=self.n, q=self.q, Xs=self.Xe, Xe=self.Xe, m=self.m - self.n, tag=self.tag
|
||||
)
|
||||
# swap secret and noise
|
||||
# TODO: this is somewhat arbitrary
|
||||
if self.Xe < self.Xs and self.m < 2 * self.n:
|
||||
return LWEParameters(n=self.n, q=self.q, Xs=self.Xe, Xe=self.Xs, m=self.n, tag=self.tag)
|
||||
|
||||
# nothing to do
|
||||
return self
|
||||
|
||||
def updated(self, **kwds):
|
||||
"""
|
||||
Return a new set of parameters updated according to ``kwds``.
|
||||
|
||||
:param kwds: We set ``key`` to ``value`` in the new set of parameters.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Kyber512
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=512, tag='Kyber 512')
|
||||
>>> Kyber512.updated(m=1337)
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=1337, tag='Kyber 512')
|
||||
|
||||
"""
|
||||
d = dict(self.__dict__)
|
||||
d.update(kwds)
|
||||
return LWEParameters(**d)
|
||||
|
||||
def amplify_m(self, m):
|
||||
"""
|
||||
Return a LWE instance parameters with ``m`` samples produced from the samples in this instance.
|
||||
|
||||
:param m: New number of samples.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from sage.all import binomial, log
|
||||
>>> from estimator import *
|
||||
>>> Kyber512
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=512, tag='Kyber 512')
|
||||
>>> Kyber512.amplify_m(2**100)
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=4.58), m=..., tag='Kyber 512')
|
||||
|
||||
We can produce 2^100 samples by random ± linear combinations of 12 vectors::
|
||||
|
||||
>>> float(sqrt(12.)), float(log(binomial(1024, 12) , 2.0)) + 12
|
||||
(3.46..., 103.07...)
|
||||
|
||||
"""
|
||||
if m <= self.m:
|
||||
return self
|
||||
if self.m == oo:
|
||||
return self
|
||||
d = dict(self.__dict__)
|
||||
|
||||
if self.Xe.mean != 0:
|
||||
raise NotImplementedError("Amplifying for μ≠0 not implemented.")
|
||||
|
||||
for k in range(ceil(log(m, 2.0))):
|
||||
# - binom(n,k) positions
|
||||
# -two signs per position (+1,-1)
|
||||
# - all "-" and all "+" are the same
|
||||
if binomial(self.m, k) * 2 ** k - 1 >= m:
|
||||
Xe = NoiseDistribution.DiscreteGaussian(float(sqrt(k) * self.Xe.stddev))
|
||||
d["Xe"] = Xe
|
||||
d["m"] = ceil(m)
|
||||
return LWEParameters(**d)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Cannot amplify to ≈2^{log(m,2):1} using {{+1,-1}} additions."
|
||||
)
|
||||
|
||||
def switch_modulus(self):
|
||||
"""
|
||||
Apply modulus switching and return new instance.
|
||||
|
||||
See [JMC:AlbPlaSco15]_ for details.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.Parameters(n=128, q=7681, Xs=ND.UniformMod(3), Xe=ND.UniformMod(11)).switch_modulus()
|
||||
LWEParameters(n=128, q=5289, Xs=D(σ=0.82), Xe=D(σ=3.08), m=+Infinity, tag=None)
|
||||
|
||||
"""
|
||||
n = self.Xs.density * len(self.Xs)
|
||||
|
||||
# n uniform in -(0.5,0.5) ± stddev(χ_s)
|
||||
Xr_stddev = sqrt(n / 12) * self.Xs.stddev # rounding noise
|
||||
# χ_r == p/q ⋅ χ_e # we want the rounding noise match the scaled noise
|
||||
p = ceil(Xr_stddev * self.q / self.Xe.stddev)
|
||||
|
||||
scale = float(p) / self.q
|
||||
|
||||
# there is no point in scaling if the improvement is eaten up by rounding noise
|
||||
if scale > 1 / sqrt(2):
|
||||
return self
|
||||
|
||||
return LWEParameters(
|
||||
self.n,
|
||||
p,
|
||||
Xs=self.Xs,
|
||||
Xe=NoiseDistribution.DiscreteGaussian(sqrt(2) * self.Xe.stddev * scale),
|
||||
m=self.m,
|
||||
tag=self.tag + ",scaled" if self.tag else None,
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.n, self.q, self.Xs, self.Xe, self.m, self.tag))
|
||||
@@ -1,596 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using primal attacks.
|
||||
|
||||
See :ref:`LWE Primal Attacks` for an introduction what is available.
|
||||
|
||||
"""
|
||||
from functools import partial
|
||||
|
||||
from sage.all import oo, ceil, sqrt, log, RR, ZZ, binomial, cached_function
|
||||
from .reduction import delta as deltaf
|
||||
from .reduction import cost as costf
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .simulator import normalize as simulator_normalize
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify as prob_amplify
|
||||
from .prob import babai as prob_babai
|
||||
from .prob import mitm_babai_probability
|
||||
from .io import Logging
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import red_shape_model as red_shape_model_default
|
||||
from .conf import red_simulator as red_simulator_default
|
||||
|
||||
|
||||
class PrimalUSVP:
|
||||
"""
|
||||
Estimate cost of solving LWE via uSVP reduction.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _xi_factor(Xs, Xe):
|
||||
xi = RR(1)
|
||||
if Xs < Xe:
|
||||
xi = Xe.stddev / Xs.stddev
|
||||
return xi
|
||||
|
||||
@staticmethod
|
||||
def _solve_for_d(params, m, beta, tau, xi):
|
||||
"""
|
||||
Find smallest d ∈ [n,m] to satisfy uSVP condition.
|
||||
|
||||
If no such d exists, return the upper bound m.
|
||||
"""
|
||||
# Find the smallest d ∈ [n,m] s.t. a*d^2 + b*d + c >= 0
|
||||
delta = deltaf(beta)
|
||||
a = -log(delta)
|
||||
C = log(params.Xe.stddev ** 2 * (beta - 1) + tau ** 2) / 2.0
|
||||
b = log(delta) * (2 * beta - 1) + log(params.q) - C
|
||||
c = log(tau) + params.n * log(xi) - (params.n + 1) * log(params.q)
|
||||
n = params.n
|
||||
if a * n * n + b * n + c >= 0: # trivial case
|
||||
return n
|
||||
|
||||
# solve for ad^2 + bd + c == 0
|
||||
disc = b * b - 4 * a * c # the discriminant
|
||||
if disc < 0: # no solution, return m
|
||||
return m
|
||||
|
||||
# compute the two solutions
|
||||
d1 = (-b + sqrt(disc)) / (2 * a)
|
||||
d2 = (-b - sqrt(disc)) / (2 * a)
|
||||
if a > 0: # the only possible solution is ceiling(d2)
|
||||
return min(m, ceil(d2))
|
||||
|
||||
# the case a<=0:
|
||||
# if n is to the left of d1 then the first solution is ceil(d1)
|
||||
if n <= d1:
|
||||
return min(m, ceil(d1))
|
||||
|
||||
# otherwise, n must be larger than d2 (since an^2+bn+c<0) so no solution
|
||||
return m
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost_gsa(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
m: int = oo,
|
||||
tau=None,
|
||||
d=None,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=None,
|
||||
):
|
||||
|
||||
delta = deltaf(beta)
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
m = min(2 * ceil(sqrt(params.n * log(params.q) / log(delta))), m)
|
||||
tau = params.Xe.stddev if tau is None else tau
|
||||
d = PrimalUSVP._solve_for_d(params, m, beta, tau, xi) if d is None else d
|
||||
assert d <= m + 1
|
||||
|
||||
lhs = log(sqrt(params.Xe.stddev ** 2 * (beta - 1) + tau ** 2))
|
||||
rhs = RR(
|
||||
log(delta) * (2 * beta - d - 1)
|
||||
+ (log(tau) + log(xi) * params.n + log(params.q) * (d - params.n - 1)) / d
|
||||
)
|
||||
|
||||
return costf(red_cost_model, beta, d, predicate=lhs <= rhs)
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost_simulator(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
simulator,
|
||||
m: int = oo,
|
||||
tau=None,
|
||||
d=None,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=None,
|
||||
):
|
||||
delta = deltaf(beta)
|
||||
if d is None:
|
||||
d = min(ceil(sqrt(params.n * log(params.q) / log(delta))), m) + 1
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
tau = params.Xe.stddev if tau is None else tau
|
||||
|
||||
r = simulator(d=d, n=params.n, q=params.q, beta=beta, xi=xi, tau=tau)
|
||||
lhs = params.Xe.stddev ** 2 * (beta - 1) + tau ** 2
|
||||
if r[d - beta] > lhs:
|
||||
cost = costf(red_cost_model, beta, d)
|
||||
else:
|
||||
cost = costf(red_cost_model, beta, d, predicate=False)
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
red_cost_model=red_cost_model_default,
|
||||
red_shape_model=red_shape_model_default,
|
||||
optimize_d=True,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate cost of solving LWE via uSVP reduction.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param red_shape_model: How to model the shape of a reduced basis.
|
||||
:param optimize_d: Attempt to find minimal d, too.
|
||||
:return: A cost dictionary.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.primal_usvp(Kyber512)
|
||||
rop: ≈2^148.0, red: ≈2^148.0, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
|
||||
>>> params = LWE.Parameters(n=200, q=127, Xs=ND.UniformMod(3), Xe=ND.UniformMod(3))
|
||||
>>> LWE.primal_usvp(params, red_shape_model="cn11")
|
||||
rop: ≈2^91.2, red: ≈2^91.2, δ: 1.006114, β: 209, d: 388, tag: usvp
|
||||
|
||||
>>> LWE.primal_usvp(params, red_shape_model=Simulator.CN11)
|
||||
rop: ≈2^91.2, red: ≈2^91.2, δ: 1.006114, β: 209, d: 388, tag: usvp
|
||||
|
||||
>>> LWE.primal_usvp(params, red_shape_model=Simulator.CN11, optimize_d=False)
|
||||
rop: ≈2^91.3, red: ≈2^91.3, δ: 1.006114, β: 209, d: 400, tag: usvp
|
||||
|
||||
The success condition was formulated in [USENIX:ADPS16]_ and studied/verified in
|
||||
[AC:AGVW17]_, [C:DDGR20]_, [PKC:PosVir21]_. The treatment of small secrets is from
|
||||
[ACISP:BaiGal14]_.
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# allow for a larger embedding lattice dimension: Bai and Galbraith
|
||||
m = params.m + params.n if params.Xs <= params.Xe else params.m
|
||||
|
||||
if red_shape_model == "gsa":
|
||||
with local_minimum(40, 2 * params.n) as it:
|
||||
for beta in it:
|
||||
cost = self.cost_gsa(
|
||||
beta=beta, params=params, m=m, red_cost_model=red_cost_model, **kwds
|
||||
)
|
||||
it.update(cost)
|
||||
cost = it.y
|
||||
cost["tag"] = "usvp"
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
try:
|
||||
red_shape_model = simulator_normalize(red_shape_model)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# step 0. establish baseline
|
||||
cost_gsa = self(
|
||||
params,
|
||||
red_cost_model=red_cost_model,
|
||||
red_shape_model="gsa",
|
||||
)
|
||||
|
||||
Logging.log("usvp", log_level + 1, f"GSA: {repr(cost_gsa)}")
|
||||
|
||||
f = partial(
|
||||
self.cost_simulator,
|
||||
simulator=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
m=m,
|
||||
params=params,
|
||||
)
|
||||
|
||||
# step 1. find β
|
||||
|
||||
with local_minimum(
|
||||
cost_gsa["beta"] - ceil(0.10 * cost_gsa["beta"]),
|
||||
cost_gsa["beta"] + ceil(0.20 * cost_gsa["beta"]),
|
||||
) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta=beta, **kwds))
|
||||
cost = it.y
|
||||
|
||||
Logging.log("usvp", log_level, f"Opt-β: {repr(cost)}")
|
||||
|
||||
if cost and optimize_d:
|
||||
# step 2. find d
|
||||
with local_minimum(params.n, stop=cost["d"] + 1) as it:
|
||||
for d in it:
|
||||
it.update(f(d=d, beta=cost["beta"], **kwds))
|
||||
cost = it.y
|
||||
Logging.log("usvp", log_level + 1, f"Opt-d: {repr(cost)}")
|
||||
|
||||
cost["tag"] = "usvp"
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
__name__ = "primal_usvp"
|
||||
|
||||
|
||||
primal_usvp = PrimalUSVP()
|
||||
|
||||
|
||||
class PrimalHybrid:
|
||||
@classmethod
|
||||
def babai_cost(cls, d):
|
||||
return Cost(rop=max(d, 1) ** 2)
|
||||
|
||||
@classmethod
|
||||
def svp_dimension(cls, r, D):
|
||||
"""
|
||||
Return η for a given lattice shape and distance.
|
||||
|
||||
:param r: squared Gram-Schmidt norms
|
||||
|
||||
"""
|
||||
from fpylll.util import gaussian_heuristic
|
||||
|
||||
d = len(r)
|
||||
for i, _ in enumerate(r):
|
||||
if gaussian_heuristic(r[i:]) < D.stddev ** 2 * (d - i):
|
||||
return ZZ(d - (i - 1))
|
||||
return ZZ(2)
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
babai=False,
|
||||
mitm=False,
|
||||
m: int = oo,
|
||||
d: int = None,
|
||||
simulator=red_simulator_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Cost of the hybrid attack.
|
||||
|
||||
:param beta: Block size.
|
||||
:param params: LWE parameters.
|
||||
:param zeta: Guessing dimension ζ ≥ 0.
|
||||
:param babai: Insist on Babai's algorithm for finding close vectors.
|
||||
:param mitm: Simulate MITM approach (√ of search space).
|
||||
:param m: We accept the number of samples to consider from the calling function.
|
||||
:param d: We optionally accept the dimension to pick.
|
||||
|
||||
.. note :: This is the lowest level function that runs no optimization, it merely reports
|
||||
costs.
|
||||
|
||||
"""
|
||||
if d is None:
|
||||
delta = deltaf(beta)
|
||||
d = min(ceil(sqrt(params.n * log(params.q) / log(delta))), m) + 1
|
||||
d -= zeta
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
|
||||
# 1. Simulate BKZ-β
|
||||
# TODO: pick τ
|
||||
r = simulator(d, params.n - zeta, params.q, beta, xi=xi)
|
||||
bkz_cost = costf(red_cost_model, beta, d)
|
||||
|
||||
# 2. Required SVP dimension η
|
||||
if babai:
|
||||
eta = 2
|
||||
svp_cost = PrimalHybrid.babai_cost(d)
|
||||
else:
|
||||
# we scaled the lattice so that χ_e is what we want
|
||||
eta = PrimalHybrid.svp_dimension(r, params.Xe)
|
||||
svp_cost = costf(red_cost_model, eta, eta)
|
||||
# when η ≪ β, lifting may be a bigger cost
|
||||
svp_cost["rop"] += PrimalHybrid.babai_cost(d - eta)["rop"]
|
||||
|
||||
# 3. Search
|
||||
# We need to do one BDD call at least
|
||||
search_space, probability, hw = 1, 1.0, 0
|
||||
|
||||
# MITM or no MITM
|
||||
# TODO: this is rather clumsy as a model
|
||||
def ssf(x):
|
||||
if mitm:
|
||||
return RR(sqrt(x))
|
||||
else:
|
||||
return x
|
||||
|
||||
# e.g. (-1, 1) -> two non-zero per entry
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0]
|
||||
|
||||
if zeta:
|
||||
# the number of non-zero entries
|
||||
h = ceil(len(params.Xs) * params.Xs.density)
|
||||
probability = RR(prob_drop(params.n, h, zeta))
|
||||
hw = 1
|
||||
while hw < min(h, zeta):
|
||||
new_search_space = binomial(zeta, hw) * base ** hw
|
||||
if svp_cost.repeat(ssf(search_space + new_search_space))["rop"] < bkz_cost["rop"]:
|
||||
search_space += new_search_space
|
||||
probability += prob_drop(params.n, h, zeta, fail=hw)
|
||||
hw += 1
|
||||
else:
|
||||
break
|
||||
|
||||
svp_cost = svp_cost.repeat(ssf(search_space))
|
||||
|
||||
if mitm and zeta > 0:
|
||||
if babai:
|
||||
probability *= mitm_babai_probability(r, params.Xe.stddev, params.q)
|
||||
else:
|
||||
# TODO: the probability in this case needs to be analysed
|
||||
probability *= 1
|
||||
|
||||
if eta <= 20 and d >= 0: # NOTE: η: somewhat arbitrary bound, d: we may guess it all
|
||||
probability *= RR(prob_babai(r, sqrt(d) * params.Xe.stddev))
|
||||
|
||||
ret = Cost()
|
||||
ret["rop"] = bkz_cost["rop"] + svp_cost["rop"]
|
||||
ret["red"] = bkz_cost["rop"]
|
||||
ret["svp"] = svp_cost["rop"]
|
||||
ret["beta"] = beta
|
||||
ret["eta"] = eta
|
||||
ret["zeta"] = zeta
|
||||
ret["|S|"] = search_space
|
||||
ret["d"] = d
|
||||
ret["prob"] = probability
|
||||
|
||||
ret.register_impermanent(
|
||||
{"|S|": False},
|
||||
rop=True,
|
||||
red=True,
|
||||
svp=True,
|
||||
eta=False,
|
||||
zeta=False,
|
||||
prob=False,
|
||||
)
|
||||
|
||||
# 4. Repeat whole experiment ~1/prob times
|
||||
if probability and not RR(probability).is_NaN():
|
||||
ret = ret.repeat(
|
||||
prob_amplify(0.99, probability),
|
||||
)
|
||||
else:
|
||||
return Cost(rop=oo)
|
||||
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def cost_zeta(
|
||||
cls,
|
||||
zeta: int,
|
||||
params: LWEParameters,
|
||||
red_shape_model=red_simulator_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
m: int = oo,
|
||||
babai: bool = True,
|
||||
mitm: bool = True,
|
||||
optimize_d=True,
|
||||
log_level=5,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
This function optimizes costs for a fixed guessing dimension ζ.
|
||||
"""
|
||||
|
||||
# step 0. establish baseline
|
||||
baseline_cost = primal_usvp(
|
||||
params,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
optimize_d=False,
|
||||
log_level=log_level + 1,
|
||||
**kwds,
|
||||
)
|
||||
Logging.log("bdd", log_level, f"H0: {repr(baseline_cost)}")
|
||||
|
||||
f = partial(
|
||||
cls.cost,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
babai=babai,
|
||||
mitm=mitm,
|
||||
simulator=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
m=m,
|
||||
**kwds,
|
||||
)
|
||||
|
||||
# step 1. optimize β
|
||||
with local_minimum(
|
||||
40, baseline_cost["beta"] + 1, precision=2, log_level=log_level + 1
|
||||
) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta))
|
||||
for beta in it.neighborhood:
|
||||
it.update(f(beta))
|
||||
cost = it.y
|
||||
|
||||
Logging.log("bdd", log_level, f"H1: {repr(cost)}")
|
||||
|
||||
# step 2. optimize d
|
||||
if cost and cost.get("tag", "XXX") != "usvp" and optimize_d:
|
||||
with local_minimum(
|
||||
params.n, cost["d"] + cost["zeta"] + 1, log_level=log_level + 1
|
||||
) as it:
|
||||
for d in it:
|
||||
it.update(f(beta=cost["beta"], d=d))
|
||||
cost = it.y
|
||||
Logging.log("bdd", log_level, f"H2: {repr(cost)}")
|
||||
|
||||
if cost is None:
|
||||
return Cost(rop=oo)
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
babai: bool = True,
|
||||
zeta: int = None,
|
||||
mitm: bool = True,
|
||||
red_shape_model=red_shape_model_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate the cost of the hybrid attack and its variants.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param zeta: Guessing dimension ζ ≥ 0.
|
||||
:param babai: Insist on Babai's algorithm for finding close vectors.
|
||||
:param mitm: Simulate MITM approach (√ of search space).
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``η``: Dimension of the final BDD call.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``|S|``: Guessing search space.
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repeat``: How often to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
- When ζ = 0 this function essentially estimates the BDD strategy as given in [RSA:LiuNgu13]_.
|
||||
- When ζ ≠ 0 and ``babai=True`` this function estimates the hybrid attack as given in
|
||||
[C:HowgraveGraham07]_
|
||||
- When ζ ≠ 0 and ``babai=False`` this function estimates the hybrid attack as given in
|
||||
[SAC:AlbCurWun19]_
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = False)
|
||||
rop: ≈2^94.9, red: ≈2^94.3, svp: ≈2^93.3, β: 178, η: 21, ζ: 256, |S|: ≈2^56.6, d: 531, prob: 0.003, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = True)
|
||||
rop: ≈2^94.7, red: ≈2^94.0, svp: ≈2^93.3, β: 169, η: 2, ζ: 256, |S|: ≈2^62.4, d: 519, prob: 0.001, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = False)
|
||||
rop: ≈2^75.4, red: ≈2^75.0, svp: ≈2^73.3, β: 102, η: 15, ζ: 322, |S|: ≈2^82.9, d: 354, prob: 0.001, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = True)
|
||||
rop: ≈2^86.6, red: ≈2^85.7, svp: ≈2^85.6, β: 104, η: 2, ζ: 371, |S|: ≈2^91.1, d: 308, prob: ≈2^-21.3, ...
|
||||
|
||||
"""
|
||||
|
||||
if zeta == 0:
|
||||
tag = "bdd"
|
||||
else:
|
||||
tag = "hybrid"
|
||||
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# allow for a larger embedding lattice dimension: Bai and Galbraith
|
||||
m = params.m + params.n if params.Xs <= params.Xe else params.m
|
||||
|
||||
red_shape_model = simulator_normalize(red_shape_model)
|
||||
|
||||
f = partial(
|
||||
self.cost_zeta,
|
||||
params=params,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
babai=babai,
|
||||
mitm=mitm,
|
||||
m=m,
|
||||
log_level=log_level + 1,
|
||||
)
|
||||
|
||||
if zeta is None:
|
||||
with local_minimum(0, params.n, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
it.update(
|
||||
f(
|
||||
zeta=zeta,
|
||||
optimize_d=False,
|
||||
**kwds,
|
||||
)
|
||||
)
|
||||
# TODO: this should not be required
|
||||
cost = min(it.y, f(0, optimize_d=False, **kwds))
|
||||
else:
|
||||
cost = f(zeta=zeta)
|
||||
|
||||
cost["tag"] = tag
|
||||
cost["problem"] = params
|
||||
|
||||
if tag == "bdd":
|
||||
cost["tag"] = tag
|
||||
cost["problem"] = params
|
||||
try:
|
||||
del cost["|S|"]
|
||||
del cost["prob"]
|
||||
del cost["repetitions"]
|
||||
del cost["zeta"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return cost
|
||||
|
||||
__name__ = "primal_hybrid"
|
||||
|
||||
|
||||
primal_hybrid = PrimalHybrid()
|
||||
|
||||
|
||||
def primal_bdd(
|
||||
params: LWEParameters,
|
||||
red_shape_model=red_shape_model_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate the cost of the BDD approach as given in [RSA:LiuNgu13]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param red_shape_model: How to model the shape of a reduced basis
|
||||
|
||||
"""
|
||||
|
||||
return primal_hybrid(
|
||||
params,
|
||||
zeta=0,
|
||||
mitm=False,
|
||||
babai=False,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
log_level=log_level,
|
||||
**kwds,
|
||||
)
|
||||
@@ -1,374 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from sage.all import parent, RR, RealField, sqrt, pi, oo, ceil, binomial, exp
|
||||
|
||||
|
||||
def stddevf(sigma):
|
||||
"""
|
||||
Gaussian width parameter σ → standard deviation.
|
||||
|
||||
:param sigma: Gaussian width parameter σ
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import stddevf
|
||||
>>> stddevf(64.0)
|
||||
25.532...
|
||||
|
||||
>>> stddevf(64)
|
||||
25.532...
|
||||
|
||||
>>> stddevf(RealField(256)(64)).prec()
|
||||
256
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
prec = parent(sigma).prec()
|
||||
except AttributeError:
|
||||
prec = 0
|
||||
if prec > 0:
|
||||
FF = parent(sigma)
|
||||
else:
|
||||
FF = RR
|
||||
return FF(sigma) / FF(sqrt(2 * pi))
|
||||
|
||||
|
||||
def sigmaf(stddev):
|
||||
"""
|
||||
Standard deviation → Gaussian width parameter σ.
|
||||
|
||||
:param stddev: standard deviation
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import stddevf, sigmaf
|
||||
>>> n = 64.0
|
||||
>>> sigmaf(stddevf(n))
|
||||
64.000...
|
||||
|
||||
>>> sigmaf(RealField(128)(1.0))
|
||||
2.5066282746310005024157652848110452530
|
||||
>>> sigmaf(1.0)
|
||||
2.506628274631...
|
||||
>>> sigmaf(1)
|
||||
2.506628274631...
|
||||
|
||||
"""
|
||||
RR = parent(stddev)
|
||||
# check that we got ourselves a real number type
|
||||
try:
|
||||
if abs(RR(0.5) - 0.5) > 0.001:
|
||||
RR = RealField(53) # hardcode something
|
||||
except TypeError:
|
||||
RR = RealField(53) # hardcode something
|
||||
return RR(sqrt(2 * pi)) * stddev
|
||||
|
||||
|
||||
@dataclass
|
||||
class NoiseDistribution:
|
||||
"""
|
||||
All noise distributions are instances of this class.
|
||||
|
||||
"""
|
||||
|
||||
# cut-off for Gaussian distributions
|
||||
gaussian_tail_bound = 2
|
||||
|
||||
# probability that a coefficient falls within the cut-off
|
||||
gaussian_tail_prob = 1 - 2 * exp(-4 * pi)
|
||||
|
||||
stddev: float
|
||||
mean: float = 0
|
||||
n: int = None
|
||||
bounds: tuple = None
|
||||
density: float = 1.0 # Hamming weight / dimension.
|
||||
tag: str = ""
|
||||
|
||||
def __lt__(self, other):
|
||||
"""
|
||||
We compare distributions by comparing their standard deviation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(2.0) < ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(3.0) < ND.CenteredBinomial(18)
|
||||
False
|
||||
>>> ND.DiscreteGaussian(4.0) < ND.CenteredBinomial(18)
|
||||
False
|
||||
|
||||
"""
|
||||
try:
|
||||
return self.stddev < other.stddev
|
||||
except AttributeError:
|
||||
return self.stddev < other
|
||||
|
||||
def __le__(self, other):
|
||||
"""
|
||||
We compare distributions by comparing their standard deviation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(2.0) <= ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(3.0) <= ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(4.0) <= ND.CenteredBinomial(18)
|
||||
False
|
||||
|
||||
"""
|
||||
try:
|
||||
return self.stddev <= other.stddev
|
||||
except AttributeError:
|
||||
return self.stddev <= other
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussianAlpha(0.01, 7681)
|
||||
D(σ=30.64)
|
||||
|
||||
"""
|
||||
if self.n:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f}, n={int(self.n)})"
|
||||
else:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f})"
|
||||
|
||||
def __repr__(self):
|
||||
if self.mean == 0.0:
|
||||
return f"D(σ={float(self.stddev):.2f})"
|
||||
else:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f})"
|
||||
|
||||
def __hash__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> hash(ND(3.0, 1.0)) == hash((3.0, 1.0, None))
|
||||
True
|
||||
|
||||
"""
|
||||
return hash((self.stddev, self.mean, self.n))
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> D = ND.SparseTernary(1024, p=128, m=128)
|
||||
>>> len(D)
|
||||
1024
|
||||
>>> int(round(len(D) * float(D.density)))
|
||||
256
|
||||
|
||||
"""
|
||||
if self.n is not None:
|
||||
return self.n
|
||||
else:
|
||||
raise ValueError("Distribution has no length.")
|
||||
|
||||
@property
|
||||
def is_Gaussian_like(self):
|
||||
return ("Gaussian" in self.tag) or ("CenteredBinomial" in self.tag)
|
||||
|
||||
@property
|
||||
def is_bounded(self):
|
||||
return (self.bounds[1] - self.bounds[0]) < oo
|
||||
|
||||
@property
|
||||
def is_sparse(self):
|
||||
"""
|
||||
We consider a distribution "sparse" if its density is < 1/2.
|
||||
"""
|
||||
# NOTE: somewhat arbitrary
|
||||
return self.density < 0.5
|
||||
|
||||
def support_size(self, n=None, fraction=1.0):
|
||||
"""
|
||||
Compute the size of the support covering the probability given as fraction.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> D = ND.Uniform(-3,3, 64)
|
||||
>>> D.support_size(fraction=.99)
|
||||
1207562882759477428726191443614714994252339953407098880
|
||||
>>> D = ND.SparseTernary(64, 8)
|
||||
>>> D.support_size()
|
||||
32016101348447354880
|
||||
"""
|
||||
if not n:
|
||||
if not self.n:
|
||||
raise ValueError(f"Length required to determine support size, but n was {n}.")
|
||||
n = self.n
|
||||
|
||||
if "SparseTernary" in self.tag:
|
||||
h = self.h
|
||||
# TODO: this is assuming that the non-zero entries are uniform over {-1,1}
|
||||
# need p and m for more accurate calculation
|
||||
size = 2 ** h * binomial(n, h) * RR(fraction)
|
||||
elif self.is_bounded:
|
||||
# TODO: this might be suboptimal/inaccurate for binomial distribution
|
||||
a, b = self.bounds
|
||||
size = RR(fraction) * (b - a + 1) ** n
|
||||
else:
|
||||
# Looks like nd is Gaussian
|
||||
# -> we'll treat it as bounded (with failure probability)
|
||||
t = self.gaussian_tail_bound
|
||||
p = self.gaussian_tail_prob
|
||||
|
||||
if p ** n < fraction:
|
||||
raise NotImplementedError(
|
||||
f"TODO(nd.support-size): raise t. {RR(p ** n)}, {n}, {fraction}"
|
||||
)
|
||||
|
||||
b = 2 * t * sigmaf(self.stddev) + 1
|
||||
return (2 * b + 1) ** n
|
||||
return ceil(size)
|
||||
|
||||
def get_hamming_weight(self, n=None):
|
||||
if hasattr(self, "h"):
|
||||
return self.h
|
||||
|
||||
if not n:
|
||||
if not self.n:
|
||||
raise ValueError("Length required to determine hamming weight.")
|
||||
n = self.n
|
||||
return round(n * self.density)
|
||||
|
||||
@staticmethod
|
||||
def DiscreteGaussian(stddev, mean=0, n=None):
|
||||
"""
|
||||
A discrete Gaussian distribution with standard deviation ``stddev`` per component.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(3.0, 1.0)
|
||||
D(σ=3.00, μ=1.00)
|
||||
|
||||
"""
|
||||
return NoiseDistribution(
|
||||
stddev=RR(stddev), mean=RR(mean), n=n, bounds=(-oo, oo), tag="DiscreteGaussian"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def DiscreteGaussianAlpha(alpha, q, mean=0, n=None):
|
||||
"""
|
||||
A discrete Gaussian distribution with standard deviation α⋅q/√(2π) per component.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussianAlpha(0.001, 2048)
|
||||
D(σ=0.82)
|
||||
|
||||
"""
|
||||
stddev = stddevf(alpha * q)
|
||||
return NoiseDistribution.DiscreteGaussian(stddev=RR(stddev), mean=RR(mean), n=n)
|
||||
|
||||
@staticmethod
|
||||
def CenteredBinomial(eta, n=None):
|
||||
"""
|
||||
Sample a_1, …, a_η, b_1, …, b_η and return Σ(a_i - b_i).
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.CenteredBinomial(8)
|
||||
D(σ=2.00)
|
||||
|
||||
"""
|
||||
stddev = sqrt(eta / 2.0)
|
||||
# TODO: density
|
||||
return NoiseDistribution(
|
||||
stddev=RR(stddev), mean=RR(0), n=n, bounds=(-eta, eta), tag="CenteredBinomial"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Uniform(a, b, n=None):
|
||||
"""
|
||||
Uniform distribution ∈ ``[a,b]``, endpoints inclusive.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.Uniform(-3, 3)
|
||||
D(σ=2.00)
|
||||
>>> ND.Uniform(-4, 3)
|
||||
D(σ=2.29, μ=-0.50)
|
||||
|
||||
"""
|
||||
if b < a:
|
||||
raise ValueError(f"upper limit must be larger than lower limit but got: {b} < {a}")
|
||||
m = b - a + 1
|
||||
mean = (a + b) / RR(2)
|
||||
stddev = sqrt((m ** 2 - 1) / RR(12))
|
||||
|
||||
if a <= 0 and 0 <= b:
|
||||
density = 1.0 - 1.0 / m
|
||||
else:
|
||||
density = 0.0
|
||||
|
||||
return NoiseDistribution(
|
||||
n=n, stddev=stddev, mean=mean, bounds=(a, b), density=density, tag="Uniform"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def UniformMod(q, n=None):
|
||||
"""
|
||||
Uniform mod ``q``, with balanced representation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.UniformMod(7)
|
||||
D(σ=2.00)
|
||||
>>> ND.UniformMod(8)
|
||||
D(σ=2.29, μ=-0.50)
|
||||
|
||||
|
||||
"""
|
||||
a = -(q // 2)
|
||||
b = q // 2
|
||||
if q % 2 == 0:
|
||||
b -= 1
|
||||
return NoiseDistribution.Uniform(a, b, n=n)
|
||||
|
||||
@staticmethod
|
||||
def SparseTernary(n, p, m=None):
|
||||
"""
|
||||
Distribution of vectors of length ``n`` with ``p`` entries of 1 and ``m`` entries of -1, rest 0.
|
||||
|
||||
EXAMPLE::
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.SparseTernary(100, p=10)
|
||||
D(σ=0.45)
|
||||
>>> ND.SparseTernary(100, p=10, m=10)
|
||||
D(σ=0.45)
|
||||
>>> ND.SparseTernary(100, p=10, m=8)
|
||||
D(σ=0.42, μ=0.02)
|
||||
|
||||
"""
|
||||
if m is None:
|
||||
m = p
|
||||
|
||||
if n == 0:
|
||||
# this might happen in the dual attack
|
||||
return NoiseDistribution(stddev=0, mean=0, density=0, bounds=(-1, 1), tag="SparseTernary", n=0)
|
||||
mean = RR(p / n - m / n)
|
||||
stddev = RR(sqrt((p + m) / n))
|
||||
density = RR((p + m) / n)
|
||||
D = NoiseDistribution(
|
||||
stddev=stddev, mean=mean, density=density, bounds=(-1, 1), tag="SparseTernary", n=n
|
||||
)
|
||||
D.h = p + m
|
||||
return D
|
||||
@@ -1,131 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import binomial, ZZ, log, ceil, RealField, oo, exp, pi
|
||||
from sage.all import RealDistribution, RR, sqrt, prod, erf
|
||||
from .nd import sigmaf
|
||||
|
||||
|
||||
def mitm_babai_probability(r, stddev, q, fast=False):
|
||||
"""
|
||||
Compute the "e-admissibility" probability associated to the mitm step, according to
|
||||
[EPRINT:SonChe19]_
|
||||
|
||||
:params r: the squared GSO lengths
|
||||
:params stddev: the std.dev of the error distribution
|
||||
:params q: the LWE modulus
|
||||
:param fast: toggle for setting p = 1 (faster, but underestimates security)
|
||||
:return: probability for the mitm process
|
||||
|
||||
# NOTE: the model sometimes outputs negative probabilities, we set p = 0 in this case
|
||||
"""
|
||||
|
||||
if fast:
|
||||
# overestimate the probability -> underestimate security
|
||||
p = 1
|
||||
else:
|
||||
# get non-squared norms
|
||||
R = [sqrt(s) for s in r]
|
||||
alphaq = sigmaf(stddev)
|
||||
probs = [
|
||||
RR(
|
||||
erf(s * sqrt(RR(pi)) / alphaq)
|
||||
+ (alphaq / s) * ((exp(-s * sqrt(RR(pi)) / alphaq) - 1) / RR(pi))
|
||||
)
|
||||
for s in R
|
||||
]
|
||||
p = RR(prod(probs))
|
||||
if p < 0 or p > 1:
|
||||
p = 0.0
|
||||
return p
|
||||
|
||||
|
||||
def babai(r, norm):
|
||||
"""
|
||||
Babai probability following [EPRINT:Wun16]_.
|
||||
|
||||
"""
|
||||
R = [RR(sqrt(t) / (2 * norm)) for t in r]
|
||||
T = RealDistribution("beta", ((len(r) - 1) / 2, 1.0 / 2))
|
||||
probs = [1 - T.cum_distribution_function(1 - s ** 2) for s in R]
|
||||
return prod(probs)
|
||||
|
||||
|
||||
def drop(n, h, k, fail=0, rotations=False):
|
||||
"""
|
||||
Probability that ``k`` randomly sampled components have ``fail`` non-zero components amongst
|
||||
them.
|
||||
|
||||
:param n: LWE dimension `n > 0`
|
||||
:param h: number of non-zero components
|
||||
:param k: number of components to ignore
|
||||
:param fail: we tolerate ``fail`` number of non-zero components amongst the `k` ignored
|
||||
components
|
||||
:param rotations: consider rotations of the basis to exploit ring structure (NTRU only)
|
||||
"""
|
||||
|
||||
N = n # population size
|
||||
K = n - h # number of success states in the population
|
||||
n = k # number of draws
|
||||
k = n - fail # number of observed successes
|
||||
prob_drop = binomial(K, k) * binomial(N - K, n - k) / binomial(N, n)
|
||||
if rotations:
|
||||
return 1 - (1 - prob_drop) ** N
|
||||
else:
|
||||
return prob_drop
|
||||
|
||||
|
||||
def amplify(target_success_probability, success_probability, majority=False):
|
||||
"""
|
||||
Return the number of trials needed to amplify current `success_probability` to
|
||||
`target_success_probability`
|
||||
|
||||
:param target_success_probability: targeted success probability < 1
|
||||
:param success_probability: targeted success probability < 1
|
||||
:param majority: if `True` amplify a deicsional problem, not a computational one
|
||||
if `False` then we assume that we can check solutions, so one success suffices
|
||||
|
||||
:returns: number of required trials to amplify
|
||||
"""
|
||||
if target_success_probability < success_probability:
|
||||
return ZZ(1)
|
||||
if success_probability == 0.0:
|
||||
return oo
|
||||
|
||||
prec = max(
|
||||
53,
|
||||
2 * ceil(abs(log(success_probability, 2))),
|
||||
2 * ceil(abs(log(1 - success_probability, 2))),
|
||||
2 * ceil(abs(log(target_success_probability, 2))),
|
||||
2 * ceil(abs(log(1 - target_success_probability, 2))),
|
||||
)
|
||||
prec = min(prec, 2048)
|
||||
RR = RealField(prec)
|
||||
|
||||
success_probability = RR(success_probability)
|
||||
target_success_probability = RR(target_success_probability)
|
||||
|
||||
try:
|
||||
if majority:
|
||||
eps = success_probability / 2
|
||||
return ceil(2 * log(2 - 2 * target_success_probability) / log(1 - 4 * eps ** 2))
|
||||
else:
|
||||
# target_success_probability = 1 - (1-success_probability)^trials
|
||||
return ceil(log(1 - target_success_probability) / log(1 - success_probability))
|
||||
except ValueError:
|
||||
return oo
|
||||
|
||||
|
||||
def amplify_sigma(target_advantage, sigma, q):
|
||||
"""
|
||||
Amplify distinguishing advantage for a given σ and q
|
||||
|
||||
:param target_advantage:
|
||||
:param sigma: (Lists of) Gaussian width parameters
|
||||
:param q: Modulus q > 0
|
||||
|
||||
"""
|
||||
try:
|
||||
sigma = sum(sigma_ ** 2 for sigma_ in sigma).sqrt()
|
||||
except TypeError:
|
||||
pass
|
||||
advantage = float(exp(-float(pi) * (float(sigma / q) ** 2)))
|
||||
return amplify(target_advantage, advantage, majority=True)
|
||||
@@ -1,571 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Cost estimates for lattice redution.
|
||||
"""
|
||||
|
||||
from sage.all import ZZ, RR, pi, e, find_root, ceil, log, oo, round
|
||||
from scipy.optimize import newton
|
||||
|
||||
|
||||
def _delta(beta):
|
||||
"""
|
||||
Compute δ from block size β without enforcing β ∈ ZZ.
|
||||
|
||||
δ for β ≤ 40 were computed as follows:
|
||||
|
||||
```
|
||||
# -*- coding: utf-8 -*-
|
||||
from fpylll import BKZ, IntegerMatrix
|
||||
|
||||
from multiprocessing import Pool
|
||||
from sage.all import mean, sqrt, exp, log, cputime
|
||||
|
||||
d, trials = 320, 32
|
||||
|
||||
def f((A, beta)):
|
||||
|
||||
par = BKZ.Param(block_size=beta, strategies=BKZ.DEFAULT_STRATEGY, flags=BKZ.AUTO_ABORT)
|
||||
q = A[-1, -1]
|
||||
d = A.nrows
|
||||
t = cputime()
|
||||
A = BKZ.reduction(A, par, float_type="dd")
|
||||
t = cputime(t)
|
||||
return t, exp(log(A[0].norm()/sqrt(q).n())/d)
|
||||
|
||||
if __name__ == '__main__':
|
||||
for beta in (5, 10, 15, 20, 25, 28, 30, 35, 40):
|
||||
delta = []
|
||||
t = []
|
||||
i = 0
|
||||
while i < trials:
|
||||
threads = int(open("delta.nthreads").read()) # make sure this file exists
|
||||
pool = Pool(threads)
|
||||
A = [(IntegerMatrix.random(d, "qary", beta=d//2, bits=50), beta) for j in range(threads)]
|
||||
for (t_, delta_) in pool.imap_unordered(f, A):
|
||||
t.append(t_)
|
||||
delta.append(delta_)
|
||||
i += threads
|
||||
print u"β: %2d, δ_0: %.5f, time: %5.1fs, (%2d,%2d)"%(beta, mean(delta), mean(t), i, threads)
|
||||
print
|
||||
```
|
||||
|
||||
"""
|
||||
small = (
|
||||
(2, 1.02190), # noqa
|
||||
(5, 1.01862), # noqa
|
||||
(10, 1.01616),
|
||||
(15, 1.01485),
|
||||
(20, 1.01420),
|
||||
(25, 1.01342),
|
||||
(28, 1.01331),
|
||||
(40, 1.01295),
|
||||
)
|
||||
|
||||
if beta <= 2:
|
||||
return RR(1.0219)
|
||||
elif beta < 40:
|
||||
for i in range(1, len(small)):
|
||||
if small[i][0] > beta:
|
||||
return RR(small[i - 1][1])
|
||||
elif beta == 40:
|
||||
return RR(small[-1][1])
|
||||
else:
|
||||
return RR(beta / (2 * pi * e) * (pi * beta) ** (1 / beta)) ** (1 / (2 * (beta - 1)))
|
||||
|
||||
|
||||
def delta(beta):
|
||||
"""
|
||||
Compute root-Hermite factor δ from block size β.
|
||||
|
||||
:param beta: Block size.
|
||||
"""
|
||||
beta = ZZ(round(beta))
|
||||
return _delta(beta)
|
||||
|
||||
|
||||
def _beta_secant(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> 50 == RC._beta_secant(1.0121)
|
||||
True
|
||||
>>> 100 == RC._beta_secant(1.0093)
|
||||
True
|
||||
>>> RC._beta_secant(1.0024) # Chen reports 800
|
||||
808
|
||||
|
||||
"""
|
||||
# newton() will produce a "warning", if two subsequent function values are
|
||||
# indistinguishable (i.e. equal in terms of machine precision). In this case
|
||||
# newton() will return the value beta in the middle between the two values
|
||||
# k1,k2 for which the function values were indistinguishable.
|
||||
# Since f approaches zero for beta->+Infinity, this may be the case for very
|
||||
# large inputs, like beta=1e16.
|
||||
# For now, these warnings just get printed and the value beta is used anyways.
|
||||
# This seems reasonable, since for such large inputs the exact value of beta
|
||||
# doesn't make such a big difference.
|
||||
try:
|
||||
beta = newton(
|
||||
lambda beta: RR(_delta(beta) - delta),
|
||||
100,
|
||||
fprime=None,
|
||||
args=(),
|
||||
tol=1.48e-08,
|
||||
maxiter=500,
|
||||
)
|
||||
beta = ceil(beta)
|
||||
if beta < 40:
|
||||
# newton may output beta < 40. The old beta method wouldn't do this. For
|
||||
# consistency, call the old beta method, i.e. consider this try as "failed".
|
||||
raise RuntimeError("β < 40")
|
||||
return beta
|
||||
except (RuntimeError, TypeError):
|
||||
# if something fails, use old beta method
|
||||
beta = _beta_simple(delta)
|
||||
return beta
|
||||
|
||||
|
||||
def _beta_find_root(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC._beta_find_root(RC.delta(500))
|
||||
500
|
||||
|
||||
"""
|
||||
# handle beta < 40 separately
|
||||
beta = ZZ(40)
|
||||
if _delta(beta) < delta:
|
||||
return beta
|
||||
|
||||
try:
|
||||
beta = find_root(lambda beta: RR(_delta(beta) - delta), 40, 2 ** 16, maxiter=500)
|
||||
beta = ceil(beta - 1e-8)
|
||||
except RuntimeError:
|
||||
# finding root failed; reasons:
|
||||
# 1. maxiter not sufficient
|
||||
# 2. no root in given interval
|
||||
beta = _beta_simple(delta)
|
||||
return beta
|
||||
|
||||
|
||||
def _beta_simple(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC._beta_simple(RC.delta(500))
|
||||
501
|
||||
|
||||
"""
|
||||
beta = ZZ(40)
|
||||
|
||||
while _delta(2 * beta) > delta:
|
||||
beta *= 2
|
||||
while _delta(beta + 10) > delta:
|
||||
beta += 10
|
||||
while True:
|
||||
if _delta(beta) < delta:
|
||||
break
|
||||
beta += 1
|
||||
|
||||
return beta
|
||||
|
||||
|
||||
def beta(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-hermite factor.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> 50 == RC.beta(1.0121)
|
||||
True
|
||||
>>> 100 == RC.beta(1.0093)
|
||||
True
|
||||
>>> RC.beta(1.0024) # Chen reports 800
|
||||
808
|
||||
|
||||
"""
|
||||
# TODO: decide for one strategy (secant, find_root, old) and its error handling
|
||||
beta = _beta_find_root(delta)
|
||||
return beta
|
||||
|
||||
|
||||
# BKZ Estimates
|
||||
|
||||
|
||||
def svp_repeat(beta, d):
|
||||
"""
|
||||
Return number of SVP calls in BKZ-β.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
|
||||
.. note :: Loosely based on experiments in [PhD:Chen13].
|
||||
|
||||
.. note :: When d ≤ β we return 1.
|
||||
|
||||
"""
|
||||
if beta < d:
|
||||
return 8 * d
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def LLL(d, B=None):
|
||||
"""
|
||||
Runtime estimation for LLL algorithm based on [AC:CheNgu11]_.
|
||||
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
"""
|
||||
if B:
|
||||
return d ** 3 * B ** 2
|
||||
else:
|
||||
return d ** 3 # ignoring B for backward compatibility
|
||||
|
||||
|
||||
def _BDGL16_small(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given β and assuming sieving is used to realise the SVP oracle for small
|
||||
dimensions following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC._BDGL16_small(500, 1024), 2.0)
|
||||
222.9
|
||||
|
||||
"""
|
||||
return LLL(d, B) + ZZ(2) ** RR(0.387 * beta + 16.4 + log(svp_repeat(beta, d), 2))
|
||||
|
||||
|
||||
def _BDGL16_asymptotic(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given `β` and assuming sieving is used to realise the SVP oracle following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC._BDGL16_asymptotic(500, 1024), 2.0)
|
||||
175.4
|
||||
"""
|
||||
# TODO we simply pick the same additive constant 16.4 as for the experimental result in [SODA:BDGL16]_
|
||||
return LLL(d, B) + ZZ(2) ** RR(0.292 * beta + 16.4 + log(svp_repeat(beta, d), 2))
|
||||
|
||||
|
||||
def BDGL16(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given `β` and assuming sieving is used to realise the SVP oracle following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.BDGL16(500, 1024), 2.0)
|
||||
175.4
|
||||
|
||||
"""
|
||||
# TODO this is somewhat arbitrary
|
||||
if beta <= 90:
|
||||
return _BDGL16_small(beta, d, B)
|
||||
else:
|
||||
return _BDGL16_asymptotic(beta, d, B)
|
||||
|
||||
|
||||
def LaaMosPol14(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation for quantum sieving following [EPRINT:LaaMosPol14]_ and [PhD:Laarhoven15]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.LaaMosPol14(500, 1024), 2.0)
|
||||
161.9
|
||||
|
||||
"""
|
||||
return LLL(d, B) + ZZ(2) ** RR((0.265 * beta + 16.4 + log(svp_repeat(beta, d), 2)))
|
||||
|
||||
|
||||
def CheNgu12(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given β and assuming [CheNgu12]_ estimates are correct.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
The constants in this function were derived as follows based on Table 4 in
|
||||
[CheNgu12]_::
|
||||
|
||||
>>> from sage.all import var, find_fit
|
||||
>>> dim = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250]
|
||||
>>> nodes = [39.0, 44.0, 49.0, 54.0, 60.0, 66.0, 72.0, 78.0, 84.0, 96.0, 99.0, 105.0, 111.0, 120.0, 127.0, 134.0] # noqa
|
||||
>>> times = [c + log(200,2).n() for c in nodes]
|
||||
>>> T = list(zip(dim, nodes))
|
||||
>>> var("a,b,c,beta")
|
||||
(a, b, c, beta)
|
||||
>>> f = a*beta*log(beta, 2.0) + b*beta + c
|
||||
>>> f = f.function(beta)
|
||||
>>> f.subs(find_fit(T, f, solution_dict=True))
|
||||
beta |--> 0.2701...*beta*log(beta) - 1.0192...*beta + 16.10...
|
||||
|
||||
The estimation 2^(0.18728 β⋅log_2(β) - 1.019⋅β + 16.10) is of the number of enumeration
|
||||
nodes, hence we need to multiply by the number of cycles to process one node. This cost per
|
||||
node is typically estimated as 64.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.CheNgu12(500, 1024), 2.0)
|
||||
365.70...
|
||||
|
||||
"""
|
||||
repeat = svp_repeat(beta, d)
|
||||
cost = RR(
|
||||
0.270188776350190 * beta * log(beta)
|
||||
- 1.0192050451318417 * beta
|
||||
+ 16.10253135200765
|
||||
+ log(100, 2)
|
||||
)
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ABFKSW20(beta, d, B=None):
|
||||
"""
|
||||
Enumeration cost according to [C:ABFKSW20]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ABFKSW20(500, 1024), 2.0)
|
||||
316.26...
|
||||
|
||||
"""
|
||||
if 1.5 * beta >= d or beta <= 92: # 1.5β is a bit arbitrary, β≤92 is the crossover point
|
||||
cost = RR(0.1839 * beta * log(beta, 2) - 0.995 * beta + 16.25 + log(64, 2))
|
||||
else:
|
||||
cost = RR(0.125 * beta * log(beta, 2) - 0.547 * beta + 10.4 + log(64, 2))
|
||||
|
||||
repeat = svp_repeat(beta, d)
|
||||
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ABLR21(beta, d, B=None):
|
||||
"""
|
||||
Enumeration cost according to [C:ABLR21]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ABLR21(500, 1024), 2.0)
|
||||
278.20...
|
||||
|
||||
"""
|
||||
if 1.5 * beta >= d or beta <= 97: # 1.5β is a bit arbitrary, 97 is the crossover
|
||||
cost = RR(0.1839 * beta * log(beta, 2) - 1.077 * beta + 29.12 + log(64, 2))
|
||||
else:
|
||||
cost = RR(0.1250 * beta * log(beta, 2) - 0.654 * beta + 25.84 + log(64, 2))
|
||||
|
||||
repeat = svp_repeat(beta, d)
|
||||
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ADPS16(beta, d, B=None, mode="classical"):
|
||||
"""
|
||||
Runtime estimation from [USENIX:ADPS16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ADPS16(500, 1024), 2.0)
|
||||
146.0
|
||||
>>> log(RC.ADPS16(500, 1024, mode="quantum"), 2.0)
|
||||
132.5
|
||||
>>> log(RC.ADPS16(500, 1024, mode="paranoid"), 2.0)
|
||||
103.75
|
||||
|
||||
"""
|
||||
|
||||
if mode not in ("classical", "quantum", "paranoid"):
|
||||
raise ValueError(f"Mode {mode} not understood.")
|
||||
|
||||
c = {
|
||||
"classical": 0.2920,
|
||||
"quantum": 0.2650, # paper writes 0.262 but this isn't right, see above
|
||||
"paranoid": 0.2075,
|
||||
}
|
||||
|
||||
c = c[mode]
|
||||
|
||||
return ZZ(2) ** RR(c * beta)
|
||||
|
||||
|
||||
def d4f(beta):
|
||||
"""
|
||||
Dimensions "for free" following [EC:Ducas18]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
|
||||
If β' is output by this function then sieving is expected to be required up to dimension β-β'.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC.d4f(500)
|
||||
42.597...
|
||||
|
||||
"""
|
||||
return max(float(beta * log(4 / 3.0) / log(beta / (2 * pi * e))), 0.0)
|
||||
|
||||
|
||||
# These are not asymptotic expressions but compress the data in [AC:AGPS20]_ which covers up to
|
||||
# β = 1024
|
||||
NN_AGPS = {
|
||||
"all_pairs-classical": {"a": 0.4215069316613415, "b": 20.1669683097337},
|
||||
"all_pairs-dw": {"a": 0.3171724396445732, "b": 25.29828951733785},
|
||||
"all_pairs-g": {"a": 0.3155285835002801, "b": 22.478746811528048},
|
||||
"all_pairs-ge19": {"a": 0.3222895263943544, "b": 36.11746438609666},
|
||||
"all_pairs-naive_classical": {"a": 0.4186251294633655, "b": 9.899382654377058},
|
||||
"all_pairs-naive_quantum": {"a": 0.31401512556555794, "b": 7.694659515948326},
|
||||
"all_pairs-t_count": {"a": 0.31553282515234704, "b": 20.878594142502994},
|
||||
"list_decoding-classical": {"a": 0.2988026130564745, "b": 26.011121212891872},
|
||||
"list_decoding-dw": {"a": 0.26944796385592995, "b": 28.97237346443934},
|
||||
"list_decoding-g": {"a": 0.26937450988892553, "b": 26.925140365395972},
|
||||
"list_decoding-ge19": {"a": 0.2695210400018704, "b": 35.47132142280775},
|
||||
"list_decoding-naive_classical": {"a": 0.2973130399197453, "b": 21.142124058689426},
|
||||
"list_decoding-naive_quantum": {"a": 0.2674316807758961, "b": 18.720680589028465},
|
||||
"list_decoding-t_count": {"a": 0.26945736714156543, "b": 25.913746774011887},
|
||||
"random_buckets-classical": {"a": 0.35586144233444716, "b": 23.082527816636638},
|
||||
"random_buckets-dw": {"a": 0.30704199612690264, "b": 25.581968903639485},
|
||||
"random_buckets-g": {"a": 0.30610964725102385, "b": 22.928235564044563},
|
||||
"random_buckets-ge19": {"a": 0.31089687599538407, "b": 36.02129978813208},
|
||||
"random_buckets-naive_classical": {"a": 0.35448283789554513, "b": 15.28878540793908},
|
||||
"random_buckets-naive_quantum": {"a": 0.30211421791887644, "b": 11.151745013027089},
|
||||
"random_buckets-t_count": {"a": 0.30614770082829745, "b": 21.41830142853265},
|
||||
}
|
||||
|
||||
|
||||
def Kyber(beta, d, B=None, nn="classical", C=5.46):
|
||||
"""
|
||||
Runtime estimation from [Kyber20]_ and [AC:AGPS20]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
:param nn: Nearest neighbor cost model. We default to "ListDecoding" (i.e. BDGL16) and to
|
||||
the "depth × width" metric. Kyber uses "AllPairs".
|
||||
:param C: Progressive overhead lim_{β → ∞} ∑_{i ≤ β} 2^{0.292 i + o(i)}/2^{0.292 β + o(β)}.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.Kyber(500, 1024), 2.0)
|
||||
174.16...
|
||||
>>> log(RC.Kyber(500, 1024, nn="list_decoding-ge19"), 2.0)
|
||||
170.23...
|
||||
|
||||
"""
|
||||
|
||||
if beta < 20: # goes haywire
|
||||
return CheNgu12(beta, d, B)
|
||||
|
||||
if nn == "classical":
|
||||
nn = "list_decoding-classical"
|
||||
elif nn == "quantum":
|
||||
nn = "list_decoding-dw"
|
||||
|
||||
svp_calls = C * max(d - beta, 1)
|
||||
# we do not round to the nearest integer to ensure cost is continuously increasing with β which
|
||||
# rounding can violate.
|
||||
beta_ = beta - d4f(beta)
|
||||
gate_count = 2 ** (NN_AGPS[nn]["a"] * beta_ + NN_AGPS[nn]["b"])
|
||||
return LLL(d, B=B) + svp_calls * gate_count
|
||||
|
||||
|
||||
def cost(cost_model, beta, d, B=None, predicate=None, **kwds):
|
||||
"""
|
||||
Return cost dictionary for computing vector of norm` δ_0^{d-1} Vol(Λ)^{1/d}` using provided lattice
|
||||
reduction algorithm.
|
||||
|
||||
:param cost_model:
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
:param predicate: if ``False`` cost will be infinity.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC.cost(RC.ABLR21, 120, 500)
|
||||
rop: ≈2^68.9, red: ≈2^68.9, δ: 1.008435, β: 120, d: 500
|
||||
>>> RC.cost(RC.ABLR21, 120, 500, predicate=False)
|
||||
rop: ≈2^inf, red: ≈2^inf, δ: 1.008435, β: 120, d: 500
|
||||
|
||||
"""
|
||||
from .cost import Cost
|
||||
|
||||
cost = cost_model(beta, d, B)
|
||||
delta_ = delta(beta)
|
||||
cost = Cost(rop=cost, red=cost, delta=delta_, beta=beta, d=d, **kwds)
|
||||
cost.register_impermanent(rop=True, red=True, delta=False, beta=False, d=False)
|
||||
if predicate is not None and not predicate:
|
||||
cost["red"] = oo
|
||||
cost["rop"] = oo
|
||||
return cost
|
||||
@@ -1,402 +0,0 @@
|
||||
from .nd import NoiseDistribution, stddevf
|
||||
from .lwe_parameters import LWEParameters
|
||||
|
||||
#
|
||||
# Kyber
|
||||
#
|
||||
#
|
||||
# https://pq-crystals.org/kyber/data/kyber-specification-round3-20210804.pdf
|
||||
# Table 1, Page 11, we are ignoring the compression
|
||||
#
|
||||
# https://eprint.iacr.org/2020/1308.pdf
|
||||
# Table 2, page 27, disagrees on Kyber 512
|
||||
|
||||
Kyber512 = LWEParameters(
|
||||
n=2 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(3),
|
||||
Xe=NoiseDistribution.CenteredBinomial(3),
|
||||
m=2 * 256,
|
||||
tag="Kyber 512",
|
||||
)
|
||||
|
||||
Kyber768 = LWEParameters(
|
||||
n=3 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(2),
|
||||
Xe=NoiseDistribution.CenteredBinomial(2),
|
||||
m=3 * 256,
|
||||
tag="Kyber 768",
|
||||
)
|
||||
|
||||
Kyber1024 = LWEParameters(
|
||||
n=4 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(2),
|
||||
Xe=NoiseDistribution.CenteredBinomial(2),
|
||||
m=4 * 256,
|
||||
tag="Kyber 1024",
|
||||
)
|
||||
|
||||
#
|
||||
# Saber
|
||||
#
|
||||
#
|
||||
# https://www.esat.kuleuven.be/cosic/pqcrypto/saber/files/saberspecround3.pdf
|
||||
# Table 1, page 11
|
||||
#
|
||||
# https://eprint.iacr.org/2020/1308.pdf
|
||||
# Table 2, page 27, agrees
|
||||
|
||||
LightSaber = LWEParameters(
|
||||
n=2 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(5),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=2 * 256,
|
||||
tag="LightSaber",
|
||||
)
|
||||
|
||||
Saber = LWEParameters(
|
||||
n=3 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(4),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=3 * 256,
|
||||
tag="Saber",
|
||||
)
|
||||
|
||||
FireSaber = LWEParameters(
|
||||
n=4 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(3),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=4 * 256,
|
||||
tag="FireSaber",
|
||||
)
|
||||
|
||||
NTRUHPS2048509Enc = LWEParameters(
|
||||
n=508,
|
||||
q=2048,
|
||||
Xe=NoiseDistribution.SparseTernary(508, 2048 / 16 - 1),
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
m=508,
|
||||
tag="NTRUHPS2048509Enc",
|
||||
)
|
||||
|
||||
NTRUHPS2048677Enc = LWEParameters(
|
||||
n=676,
|
||||
q=2048,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.SparseTernary(676, 2048 / 16 - 1),
|
||||
m=676,
|
||||
tag="NTRUHPS2048677Enc",
|
||||
)
|
||||
|
||||
NTRUHPS4096821Enc = LWEParameters(
|
||||
n=820,
|
||||
q=4096,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.SparseTernary(820, 4096 / 16 - 1),
|
||||
m=820,
|
||||
tag="NTRUHPS4096821Enc",
|
||||
)
|
||||
|
||||
NTRUHRSS701Enc = LWEParameters(
|
||||
n=700,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.UniformMod(3),
|
||||
m=700,
|
||||
tag="NTRUHRSS701",
|
||||
)
|
||||
|
||||
NISTPQC_R3 = (
|
||||
Kyber512,
|
||||
Kyber768,
|
||||
Kyber1024,
|
||||
LightSaber,
|
||||
Saber,
|
||||
FireSaber,
|
||||
NTRUHPS2048509Enc,
|
||||
NTRUHPS2048677Enc,
|
||||
NTRUHPS4096821Enc,
|
||||
NTRUHRSS701Enc,
|
||||
)
|
||||
|
||||
HESv111024128error = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 27,
|
||||
Xs=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
m=1024,
|
||||
tag="HESv11error",
|
||||
)
|
||||
|
||||
HESv111024128ternary = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 27,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
m=1024,
|
||||
tag="HESv11ternary",
|
||||
)
|
||||
|
||||
HESv11 = (HESv111024128error, HESv111024128ternary)
|
||||
|
||||
|
||||
# FHE schemes
|
||||
|
||||
# TFHE
|
||||
# https://tfhe.github.io/tfhe/security_and_params.html
|
||||
TFHE630 = LWEParameters(
|
||||
n=630,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="TFHE630",
|
||||
)
|
||||
|
||||
TFHE1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-25) * 2 ** 32),
|
||||
tag="TFHE630",
|
||||
)
|
||||
|
||||
# https://eprint.iacr.org/2018/421.pdf
|
||||
# Table 3, page 55
|
||||
TFHE16_500 = LWEParameters(
|
||||
n=500,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2.43 * 10 ** (-5) * 2 ** 32),
|
||||
tag="TFHE16_500",
|
||||
)
|
||||
|
||||
TFHE16_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.73 * 10 ** (-9) * 2 ** 32),
|
||||
tag="TFHE16_1024",
|
||||
)
|
||||
|
||||
# https://eprint.iacr.org/2018/421.pdf
|
||||
# Table 4, page 55
|
||||
TFHE20_612 = LWEParameters(
|
||||
n=612,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="TFHE20_612",
|
||||
)
|
||||
|
||||
TFHE20_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-26) * 2 ** 32),
|
||||
tag="TFHE20_1024",
|
||||
)
|
||||
|
||||
# FHEW
|
||||
# https://eprint.iacr.org/2014/816.pdf
|
||||
# page 14
|
||||
FHEW = LWEParameters(
|
||||
n=500,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="FHEW",
|
||||
)
|
||||
|
||||
# SEAL
|
||||
|
||||
# v2.0
|
||||
# https://www.microsoft.com/en-us/research/wp-content/uploads/2016/09/sealmanual.pdf
|
||||
# Table 3, page 19
|
||||
|
||||
SEAL20_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 48 - 2 ** 20 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_1024",
|
||||
)
|
||||
|
||||
SEAL20_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 94 - 2 ** 20 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_2048",
|
||||
)
|
||||
|
||||
SEAL20_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 190 - 2 ** 30 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_4096",
|
||||
)
|
||||
|
||||
SEAL20_8192 = LWEParameters(
|
||||
n=8192,
|
||||
q=2 ** 383 - 2 ** 33 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_8192",
|
||||
)
|
||||
|
||||
SEAL20_16384 = LWEParameters(
|
||||
n=16384,
|
||||
q=2 ** 767 - 2 ** 56 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_16384",
|
||||
)
|
||||
|
||||
# v2.2
|
||||
# https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/sealmanual_v2.2.pdf
|
||||
# Table 3, page 20
|
||||
SEAL22_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 60 - 2 ** 14 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_2048",
|
||||
)
|
||||
|
||||
SEAL22_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 116 - 2 ** 18 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_4096",
|
||||
)
|
||||
|
||||
SEAL22_8192 = LWEParameters(
|
||||
n=8192,
|
||||
q=2 ** 226 - 2 ** 26 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_8192",
|
||||
)
|
||||
|
||||
SEAL22_16384 = LWEParameters(
|
||||
n=16384,
|
||||
q=2 ** 435 - 2 ** 33 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_16384",
|
||||
)
|
||||
|
||||
SEAL22_32768 = LWEParameters(
|
||||
n=32768,
|
||||
q=2 ** 889 - 2 ** 54 - 2 ** 53 - 2 ** 52 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_32768",
|
||||
)
|
||||
|
||||
# The following are not parameters of actual schemes
|
||||
# but useful for benchmarking
|
||||
|
||||
# HElib
|
||||
# https://eprint.iacr.org/2017/047.pdf
|
||||
# Table 1, page 6
|
||||
# 80-bit security
|
||||
HElib80_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 47,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_1024",
|
||||
)
|
||||
|
||||
HElib80_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 87,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_2048",
|
||||
)
|
||||
|
||||
HElib80_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 167,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_4096",
|
||||
)
|
||||
|
||||
# 120-bit security
|
||||
HElib120_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 38,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_1024",
|
||||
)
|
||||
|
||||
HElib120_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 70,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_2048",
|
||||
)
|
||||
|
||||
HElib120_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 134,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_4096",
|
||||
)
|
||||
|
||||
|
||||
# Test parameters from CHHS
|
||||
# https://eprint.iacr.org/2019/1114.pdf
|
||||
# Table 4, page 18
|
||||
CHHS_1024_25 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 25,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_1024_25",
|
||||
)
|
||||
|
||||
CHHS_2048_38 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 38,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_2048_38",
|
||||
)
|
||||
|
||||
CHHS_2048_45 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 45,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_2048_45",
|
||||
)
|
||||
|
||||
CHHS_4096_67 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 67,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_4096_67",
|
||||
)
|
||||
|
||||
CHHS_4096_82 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 82,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_4096_82",
|
||||
)
|
||||
@@ -1,72 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Simulate lattice reduction on the rows of::
|
||||
|
||||
⌜ ξI A 0 ⌝
|
||||
ǀ 0 qI 0 |
|
||||
⌞ 0 c τ ⌟
|
||||
|
||||
where
|
||||
|
||||
- ξI ∈ ZZ^{n × n},
|
||||
- A ∈ ZZ_q^{n × m},
|
||||
- qI ∈ ZZ^{m × m},
|
||||
- τ ∈ ZZ and
|
||||
- d = m + n + 1.
|
||||
|
||||
The last row is optional.
|
||||
"""
|
||||
|
||||
from sage.all import RR, log
|
||||
|
||||
|
||||
def CN11(d, n, q, beta, xi=1, tau=1):
|
||||
from fpylll import BKZ
|
||||
from fpylll.tools.bkz_simulator import simulate
|
||||
|
||||
if tau is not None:
|
||||
r = [q ** 2] * (d - n - 1) + [xi ** 2] * n + [tau ** 2]
|
||||
else:
|
||||
r = [q ** 2] * (d - n) + [xi ** 2] * n
|
||||
|
||||
return simulate(r, BKZ.EasyParam(beta))[0]
|
||||
|
||||
|
||||
def GSA(d, n, q, beta, xi=1, tau=1):
|
||||
|
||||
"""Reduced lattice shape fallowing the Geometric Series Assumption [Schnorr03]_
|
||||
|
||||
:param d: Lattice dimension.
|
||||
:param n: Number of `q` vectors
|
||||
:param q: Modulus `q`
|
||||
:param beta: Block size β.
|
||||
:param xi: Scaling factor ξ for identity part.
|
||||
:param tau: Kannan factor τ.
|
||||
|
||||
"""
|
||||
from .reduction import delta as deltaf
|
||||
|
||||
if tau is not None:
|
||||
log_vol = RR(log(q, 2) * (d - n - 1) + log(xi, 2) * n + log(tau, 2))
|
||||
else:
|
||||
log_vol = RR(log(q, 2) * (d - n) + log(xi, 2) * n)
|
||||
|
||||
delta = deltaf(beta)
|
||||
r_log = [(d - 1 - 2 * i) * RR(log(delta, 2)) + log_vol / d for i in range(d)]
|
||||
r = [2 ** (2 * r_) for r_ in r_log]
|
||||
return r
|
||||
|
||||
|
||||
def normalize(name):
|
||||
if str(name).upper() == "CN11":
|
||||
return CN11
|
||||
elif str(name).upper() == "GSA":
|
||||
return GSA
|
||||
else:
|
||||
return name
|
||||
|
||||
|
||||
def plot_gso(r, *args, **kwds):
|
||||
from sage.all import line
|
||||
|
||||
return line([(i, log(r_, 2) / 2.0) for i, r_ in enumerate(r)], *args, **kwds)
|
||||
@@ -1,295 +0,0 @@
|
||||
from multiprocessing import Pool
|
||||
from functools import partial
|
||||
|
||||
from sage.all import ceil, floor
|
||||
|
||||
from .io import Logging
|
||||
|
||||
|
||||
class local_minimum_base:
|
||||
"""
|
||||
An iterator context for finding a local minimum using binary search.
|
||||
|
||||
We use the immediate neighborhood of a point to decide the next direction to go into (gradient
|
||||
descent style), so the algorithm is not plain binary search (see ``update()`` function.)
|
||||
|
||||
.. note :: We combine an iterator and a context to give the caller access to the result.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
start,
|
||||
stop,
|
||||
smallerf=lambda x, best: x <= best,
|
||||
suppress_bounds_warning=False,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Create a fresh local minimum search context.
|
||||
|
||||
:param start: starting point
|
||||
:param stop: end point (exclusive)
|
||||
:param smallerf: a function to decide if ``lhs`` is smaller than ``rhs``.
|
||||
:param suppress_bounds_warning: do not warn if a boundary is picked as optimal
|
||||
|
||||
"""
|
||||
|
||||
if stop < start:
|
||||
raise ValueError(f"Incorrect bounds {start} > {stop}.")
|
||||
|
||||
self._suppress_bounds_warning = suppress_bounds_warning
|
||||
self._log_level = log_level
|
||||
self._start = start
|
||||
self._stop = stop - 1
|
||||
self._initial_bounds = (start, stop - 1)
|
||||
self._smallerf = smallerf
|
||||
# abs(self._direction) == 2: binary search step
|
||||
# abs(self._direction) == 1: gradient descent direction
|
||||
self._direction = -1 # going down
|
||||
self._last_x = None
|
||||
self._next_x = self._stop
|
||||
self._best = (None, None)
|
||||
self._all_x = set()
|
||||
|
||||
def __enter__(self):
|
||||
""" """
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
""" """
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
""" """
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
abort = False
|
||||
if self._next_x is None:
|
||||
abort = True # we're told to abort
|
||||
elif self._next_x in self._all_x:
|
||||
abort = True # we're looping
|
||||
elif self._next_x < self._initial_bounds[0] or self._initial_bounds[1] < self._next_x:
|
||||
abort = True # we're stepping out of bounds
|
||||
|
||||
if not abort:
|
||||
self._last_x = self._next_x
|
||||
self._next_x = None
|
||||
return self._last_x
|
||||
|
||||
if self._best[0] in self._initial_bounds and not self._suppress_bounds_warning:
|
||||
# We warn the user if the optimal solution is at the edge and thus possibly not optimal.
|
||||
Logging.log(
|
||||
"bins",
|
||||
self._log_level,
|
||||
f'warning: "optimal" solution {self._best[0]} matches a bound ∈ {self._initial_bounds}.',
|
||||
)
|
||||
|
||||
raise StopIteration
|
||||
|
||||
@property
|
||||
def x(self):
|
||||
return self._best[0]
|
||||
|
||||
@property
|
||||
def y(self):
|
||||
return self._best[1]
|
||||
|
||||
def update(self, res):
|
||||
"""
|
||||
|
||||
TESTS:
|
||||
|
||||
We keep cache old inputs in ``_all_x`` to prevent infinite loops::
|
||||
|
||||
>>> from estimator.util import binary_search
|
||||
>>> from estimator.cost import Cost
|
||||
>>> f = lambda x, log_level=1: Cost(rop=1) if x >= 19 else Cost(rop=2)
|
||||
>>> binary_search(f, 10, 30, "x")
|
||||
rop: 1
|
||||
|
||||
"""
|
||||
|
||||
Logging.log("bins", self._log_level, f"({self._last_x}, {repr(res)})")
|
||||
|
||||
self._all_x.add(self._last_x)
|
||||
|
||||
# We got nothing yet
|
||||
if self._best[0] is None:
|
||||
self._best = self._last_x, res
|
||||
|
||||
# We found something better
|
||||
if res is not False and self._smallerf(res, self._best[1]):
|
||||
# store it
|
||||
self._best = self._last_x, res
|
||||
|
||||
# if it's a result of a long jump figure out the next direction
|
||||
if abs(self._direction) != 1:
|
||||
self._direction = -1
|
||||
self._next_x = self._last_x - 1
|
||||
# going down worked, so let's keep on doing that.
|
||||
elif self._direction == -1:
|
||||
self._direction = -2
|
||||
self._stop = self._last_x
|
||||
self._next_x = ceil((self._start + self._stop) / 2)
|
||||
# going up worked, so let's keep on doing that.
|
||||
elif self._direction == 1:
|
||||
self._direction = 2
|
||||
self._start = self._last_x
|
||||
self._next_x = floor((self._start + self._stop) / 2)
|
||||
else:
|
||||
# going downwards didn't help, let's try up
|
||||
if self._direction == -1:
|
||||
self._direction = 1
|
||||
self._next_x = self._last_x + 2
|
||||
# going up didn't help either, so we stop
|
||||
elif self._direction == 1:
|
||||
self._next_x = None
|
||||
# it got no better in a long jump, half the search space and try again
|
||||
elif self._direction == -2:
|
||||
self._start = self._last_x
|
||||
self._next_x = ceil((self._start + self._stop) / 2)
|
||||
elif self._direction == 2:
|
||||
self._stop = self._last_x
|
||||
self._next_x = floor((self._start + self._stop) / 2)
|
||||
|
||||
# We are repeating ourselves, time to stop
|
||||
if self._next_x == self._last_x:
|
||||
self._next_x = None
|
||||
|
||||
|
||||
class local_minimum(local_minimum_base):
|
||||
"""
|
||||
An iterator context for finding a local minimum using binary search.
|
||||
|
||||
We use the neighborhood of a point to decide the next direction to go into (gradient descent
|
||||
style), so the algorithm is not plain binary search (see ``update()`` function.)
|
||||
|
||||
We also zoom out by a factor ``precision``, find an approximate local minimum and then
|
||||
search the neighbourhood for the smallest value.
|
||||
|
||||
.. note :: We combine an iterator and a context to give the caller access to the result.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
start,
|
||||
stop,
|
||||
precision=1,
|
||||
smallerf=lambda x, best: x <= best,
|
||||
suppress_bounds_warning=False,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Create a fresh local minimum search context.
|
||||
|
||||
:param start: starting point
|
||||
:param stop: end point (exclusive)
|
||||
:param precision: only consider every ``precision``-th value in the main loop
|
||||
:param smallerf: a function to decide if ``lhs`` is smaller than ``rhs``.
|
||||
:param suppress_bounds_warning: do not warn if a boundary is picked as optimal
|
||||
|
||||
"""
|
||||
self._precision = precision
|
||||
self._orig_bounds = (start, stop)
|
||||
start = ceil(start / precision)
|
||||
stop = floor(stop / precision)
|
||||
local_minimum_base.__init__(self, start, stop, smallerf, suppress_bounds_warning, log_level)
|
||||
|
||||
def __next__(self):
|
||||
x = local_minimum_base.__next__(self)
|
||||
return x * self._precision
|
||||
|
||||
@property
|
||||
def x(self):
|
||||
return self._best[0] * self._precision
|
||||
|
||||
@property
|
||||
def neighborhood(self):
|
||||
"""
|
||||
An iterator over the neighborhood of the currently best value.
|
||||
"""
|
||||
|
||||
start, stop = self._orig_bounds
|
||||
|
||||
for x in range(max(start, self.x - self._precision), min(stop, self.x + self._precision)):
|
||||
yield x
|
||||
|
||||
|
||||
def binary_search(
|
||||
f, start, stop, param, step=1, smallerf=lambda x, best: x <= best, log_level=5, *args, **kwds
|
||||
):
|
||||
"""
|
||||
Searches for the best value in the interval [start,stop] depending on the given comparison function.
|
||||
|
||||
:param start: start of range to search
|
||||
:param stop: stop of range to search (exclusive)
|
||||
:param param: the parameter to modify when calling `f`
|
||||
:param smallerf: comparison is performed by evaluating ``smallerf(current, best)``
|
||||
:param step: initially only consider every `steps`-th value
|
||||
"""
|
||||
|
||||
with local_minimum(start, stop + 1, step, smallerf=smallerf, log_level=log_level) as it:
|
||||
for x in it:
|
||||
kwds_ = dict(kwds)
|
||||
kwds_[param] = x
|
||||
it.update(f(*args, **kwds_))
|
||||
|
||||
for x in it.neighborhood:
|
||||
kwds_ = dict(kwds)
|
||||
kwds_[param] = x
|
||||
it.update(f(*args, **kwds_))
|
||||
|
||||
return it.y
|
||||
|
||||
|
||||
def _batch_estimatef(f, x, log_level=0, f_repr=None):
|
||||
y = f(x)
|
||||
if f_repr is None:
|
||||
f_repr = repr(f)
|
||||
Logging.log("batch", log_level, f"f: {f_repr}")
|
||||
Logging.log("batch", log_level, f"x: {x}")
|
||||
Logging.log("batch", log_level, f"f(x): {repr(y)}")
|
||||
return y
|
||||
|
||||
|
||||
def f_name(f):
|
||||
try:
|
||||
return f.__name__
|
||||
except AttributeError:
|
||||
return repr(f)
|
||||
|
||||
|
||||
def batch_estimate(params, algorithm, jobs=1, log_level=0, **kwds):
|
||||
from .lwe_parameters import LWEParameters
|
||||
|
||||
if isinstance(params, LWEParameters):
|
||||
params = (params,)
|
||||
try:
|
||||
iter(algorithm)
|
||||
except TypeError:
|
||||
algorithm = (algorithm,)
|
||||
|
||||
tasks = []
|
||||
|
||||
for x in params:
|
||||
for f in algorithm:
|
||||
tasks.append((partial(f, **kwds), x, log_level, f_name(f)))
|
||||
|
||||
if jobs == 1:
|
||||
res = {}
|
||||
for f, x, lvl, f_repr in tasks:
|
||||
y = _batch_estimatef(f, x, lvl, f_repr)
|
||||
res[f_repr, x] = y
|
||||
else:
|
||||
pool = Pool(jobs)
|
||||
res = pool.starmap(_batch_estimatef, tasks)
|
||||
res = dict([((f_repr, x), res[i]) for i, (f, x, _, f_repr) in enumerate(tasks)])
|
||||
|
||||
ret = dict()
|
||||
for f, x in res:
|
||||
ret[x] = ret.get(x, dict())
|
||||
ret[x][f] = res[f, x]
|
||||
|
||||
return ret
|
||||
@@ -1,272 +0,0 @@
|
||||
concrete_LWE_params = {
|
||||
|
||||
# 128-bits
|
||||
|
||||
"LWE128_256":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 256,
|
||||
"sd": -5},
|
||||
|
||||
"LWE128_512":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 512,
|
||||
"sd": -11},
|
||||
|
||||
"LWE128_638":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 630,
|
||||
"sd": -14},
|
||||
|
||||
"LWE128_650":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 650,
|
||||
"sd": -15},
|
||||
|
||||
"LWE128_688":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 688,
|
||||
"sd": -16},
|
||||
|
||||
"LWE128_710":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 710,
|
||||
"sd": -17},
|
||||
|
||||
"LWE128_750":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 750,
|
||||
"sd": -17},
|
||||
|
||||
"LWE128_800":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 800,
|
||||
"sd": -19},
|
||||
|
||||
"LWE128_830":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 830,
|
||||
"sd": -20},
|
||||
|
||||
"LWE128_1024":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 1024,
|
||||
"sd": -25},
|
||||
|
||||
"LWE128_2048":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 2048,
|
||||
"sd": -52},
|
||||
|
||||
"LWE128_4096":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 4096,
|
||||
"sd": -105},
|
||||
|
||||
# 80 bits
|
||||
|
||||
"LWE80_256":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 256,
|
||||
"sd": -9,
|
||||
},
|
||||
|
||||
"LWE80_256":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 256,
|
||||
"sd": -19,
|
||||
},
|
||||
|
||||
"LWE80_512":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 512,
|
||||
"sd": -24,
|
||||
},
|
||||
|
||||
"LWE80_650":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 650,
|
||||
"sd": -25,
|
||||
},
|
||||
|
||||
"LWE80_688":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 688,
|
||||
"sd": -26,
|
||||
},
|
||||
|
||||
"LWE80_710":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 710,
|
||||
"sd": -27,
|
||||
},
|
||||
|
||||
"LWE80_750":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 750,
|
||||
"sd": -29,
|
||||
},
|
||||
|
||||
"LWE80_800":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 800,
|
||||
"sd": -31,
|
||||
},
|
||||
|
||||
"LWE80_830":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 830,
|
||||
"sd": -32,
|
||||
},
|
||||
|
||||
"LWE80_1024":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 1024,
|
||||
"sd": -40,
|
||||
},
|
||||
|
||||
"LWE80_2048":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 2048,
|
||||
"sd": -82,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
concrete_RLWE_params = {
|
||||
|
||||
# 128-bits
|
||||
|
||||
## dimension 1
|
||||
|
||||
"RLWE128_256_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 256,
|
||||
"sd": -5},
|
||||
|
||||
"RLWE128_512_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 512,
|
||||
"sd": -11},
|
||||
|
||||
"RLWE128_1024_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 1024,
|
||||
"sd": -25},
|
||||
|
||||
"RLWE128_2048_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 2048,
|
||||
"sd": -52},
|
||||
|
||||
"RLWE128_4096_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 4096,
|
||||
"sd": -105},
|
||||
|
||||
## dimension 2
|
||||
|
||||
"RLWE128_256_2":
|
||||
{
|
||||
"k": 2,
|
||||
"n": 256,
|
||||
"sd": -11},
|
||||
|
||||
"RLWE128_512_2":
|
||||
{
|
||||
"k": 2,
|
||||
"n": 512,
|
||||
"sd": -25},
|
||||
|
||||
## dimension 4
|
||||
|
||||
"RLWE128_256_4":
|
||||
{
|
||||
"k": 4,
|
||||
"n": 256,
|
||||
"sd": -25},
|
||||
|
||||
# 80 bits
|
||||
## dimension 1
|
||||
|
||||
"RLWE80_256_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 256,
|
||||
"sd": -9,
|
||||
},
|
||||
|
||||
"RLWE80_512_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 512,
|
||||
"sd": -19,
|
||||
},
|
||||
|
||||
"RLWE80_1024_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 1024,
|
||||
"sd": -40,
|
||||
},
|
||||
|
||||
"RLWE80_2048_1":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 2048,
|
||||
"sd": -82,
|
||||
},
|
||||
|
||||
# dimension 2
|
||||
|
||||
"RLWE80_256_2":
|
||||
{
|
||||
"k": 2,
|
||||
"n": 256,
|
||||
"sd": -19,
|
||||
},
|
||||
|
||||
"RLWE80_512_2":
|
||||
{
|
||||
"k": 1,
|
||||
"n": 512,
|
||||
"sd": -40,
|
||||
},
|
||||
|
||||
# dimension 4
|
||||
|
||||
"RLWE80_256_4":
|
||||
{
|
||||
"k": 4,
|
||||
"n": 256,
|
||||
"sd": -40,
|
||||
},
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
import estimator.estimator as est
|
||||
from concrete_params import concrete_LWE_params, concrete_RLWE_params
|
||||
from hybrid_decoding import parameter_search
|
||||
|
||||
def get_all_security_levels(params):
|
||||
""" A function which gets the security levels of a collection of TFHE parameters,
|
||||
using the four cost models: classical, quantum, classical_conservative, and
|
||||
quantum_conservative
|
||||
:param params: a dictionary of LWE parameter sets (see concrete_params)
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = get_all_security_levels(concrete_LWE_params)
|
||||
sage: X
|
||||
[['LWE128_256',
|
||||
126.692189756144,
|
||||
117.566189756144,
|
||||
98.6960000000000,
|
||||
89.5700000000000], ...]
|
||||
"""
|
||||
|
||||
RESULTS = []
|
||||
|
||||
for param in params:
|
||||
|
||||
results = [param]
|
||||
x = params["{}".format(param)]
|
||||
n = x["n"] * x["k"]
|
||||
q = 2 ** 32
|
||||
sd = 2 ** (x["sd"]) * q
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
secret_distribution = (0, 1)
|
||||
# assume access to an infinite number of samples
|
||||
m = oo
|
||||
|
||||
for model in cost_models:
|
||||
try:
|
||||
model = model[0]
|
||||
except:
|
||||
model = model
|
||||
estimate = parameter_search(mitm = True, reduction_cost_model = est.BKZ.sieve, n = n, q = q, alpha = alpha, m = m, secret_distribution = secret_distribution)
|
||||
results.append(get_security_level(estimate))
|
||||
|
||||
RESULTS.append(results)
|
||||
|
||||
return RESULTS
|
||||
|
||||
|
||||
def get_hybrid_security_levels(params):
|
||||
""" A function which gets the security levels of a collection of TFHE parameters,
|
||||
using the four cost models: classical, quantum, classical_conservative, and
|
||||
quantum_conservative
|
||||
:param params: a dictionary of LWE parameter sets (see concrete_params)
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = get_all_security_levels(concrete_LWE_params)
|
||||
sage: X
|
||||
[['LWE128_256',
|
||||
126.692189756144,
|
||||
117.566189756144,
|
||||
98.6960000000000,
|
||||
89.5700000000000], ...]
|
||||
"""
|
||||
|
||||
RESULTS = []
|
||||
|
||||
for param in params:
|
||||
|
||||
results = [param]
|
||||
x = params["{}".format(param)]
|
||||
n = x["n"] * x["k"]
|
||||
q = 2 ** 32
|
||||
sd = 2 ** (x["sd"]) * q
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
secret_distribution = (0, 1)
|
||||
# assume access to an infinite number of papers
|
||||
m = oo
|
||||
|
||||
model = est.BKZ.sieve
|
||||
estimate = parameter_search(mitm = True, reduction_cost_model = est.BKZ.sieve, n = n, q = q, alpha = alpha, m = m, secret_distribution = secret_distribution)
|
||||
results.append(get_security_level(estimate))
|
||||
|
||||
RESULTS.append(results)
|
||||
|
||||
return RESULTS
|
||||
|
||||
|
||||
def latexit(results):
|
||||
"""
|
||||
A function which takes the output of get_all_security_levels() and
|
||||
turns it into a latex table
|
||||
:param results: the security levels
|
||||
|
||||
sage: X = get_all_security_levels(concrete_LWE_params)
|
||||
sage: latextit(X)
|
||||
\begin{tabular}{llllll}
|
||||
LWE128_256 & $126.69$ & $117.57$ & $98.7$ & $89.57$ & $217.55$ \\
|
||||
LWE128_512 & $135.77$ & $125.92$ & $106.58$ & $96.73$ & $218.53$ \\
|
||||
LWE128_638 & $135.27$ & $125.49$ & $105.7$ & $95.93$ & $216.81$ \\
|
||||
[...]
|
||||
"""
|
||||
|
||||
return latex(table(results))
|
||||
|
||||
|
||||
def markdownit(results, headings = ["Parameter Set", "Classical", "Quantum", "Classical (c)", "Quantum (c)", "Enum"]):
|
||||
"""
|
||||
A function which takes the output of get_all_security_levels() and
|
||||
turns it into a markdown table
|
||||
:param results: the security levels
|
||||
|
||||
sage: X = get_all_security_levels(concrete_LWE_params)
|
||||
sage: markdownit(X)
|
||||
# estimates
|
||||
|Parameter Set|Classical|Quantum|Classical (c)|Quantum (c)| Enum |
|
||||
|-------------|---------|-------|-------------|-----------|------|
|
||||
|LWE128_256 |126.69 |117.57 |98.7 |89.57 |217.55|
|
||||
|LWE128_512 |135.77 |125.92 |106.58 |96.73 |218.53|
|
||||
|LWE128_638 |135.27 |125.49 |105.7 |95.93 |216.81|
|
||||
[...]
|
||||
"""
|
||||
|
||||
writer = MarkdownTableWriter(value_matrix = results, headers = headings, table_name = "estimates")
|
||||
writer.write_table()
|
||||
|
||||
return writer
|
||||
|
||||
# dual bug example
|
||||
# n = 256; q = 2**32; sd = 2**(-4); reduction_cost_model = BKZ.sieve
|
||||
# _ = estimate_lwe(n, alpha, q, reduction_cost_model)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 74 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 34 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 32 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 26 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
@@ -1,329 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Seucrity estimates for the Hybrid Decoding attack
|
||||
|
||||
Requires a local copy of the LWE Estimator from https://bitbucket.org/malb/lwe-estimator/src/master/estimator.py in a folder called "estimator"
|
||||
"""
|
||||
|
||||
from sage.all import ZZ, binomial, sqrt, log, exp, oo, pi, prod, RR
|
||||
from sage.probability.probability_distribution import RealDistribution
|
||||
from estimator import estimator as est
|
||||
from concrete_params import concrete_LWE_params
|
||||
|
||||
## Core cost models
|
||||
|
||||
core_sieve = lambda beta, d, B: ZZ(2)**RR(0.292*beta + 16.4)
|
||||
core_qsieve = lambda beta, d, B: ZZ(2)**RR(0.265*beta + 16.4)
|
||||
|
||||
## Utility functions
|
||||
|
||||
def sq_GSO(d, beta, det):
|
||||
"""
|
||||
Return squared GSO lengths after lattice reduction according to the GSA
|
||||
|
||||
:param q: LWE modulus
|
||||
:param d: lattice dimension
|
||||
:param beta: blocksize used in BKZ
|
||||
:param det: lattice determinant
|
||||
|
||||
"""
|
||||
|
||||
r = []
|
||||
for i in range(d):
|
||||
r_i = est.delta_0f(beta)**(((-2*d*i) / (d-1))+d) * det**(1/d)
|
||||
r.append(r_i**2)
|
||||
|
||||
return r
|
||||
|
||||
|
||||
def babai_probability_wun16(r, norm):
|
||||
"""
|
||||
Compute the probability of Babai's Nearest Plane, using techniques from the NTRULPrime submission to NIST
|
||||
|
||||
:param r: squared GSO lengths
|
||||
:param norm: expected norm of the target vector
|
||||
|
||||
"""
|
||||
R = [RR(sqrt(t)/(2*norm)) for t in r]
|
||||
T = RealDistribution('beta', ((len(r)-1)/2,1./2))
|
||||
probs = [1 - T.cum_distribution_function(1 - s**2) for s in R]
|
||||
return prod(probs)
|
||||
|
||||
|
||||
## Estimate hybrid decoding attack complexity
|
||||
|
||||
def hybrid_decoding_attack(n, alpha, q, m, secret_distribution,
|
||||
beta, tau = None, mitm=True, reduction_cost_model=est.BKZ.sieve):
|
||||
"""
|
||||
Estimate cost of the Hybrid Attack,
|
||||
|
||||
:param n: LWE dimension `n > 0`
|
||||
:param alpha: noise rate `0 ≤ α < 1`, noise will have standard deviation `αq/sqrt{2π}`
|
||||
:param q: modulus `0 < q`
|
||||
:param m: number of LWE samples `m > 0`
|
||||
:param secret_distribution: distribution of secret
|
||||
:param beta: BKZ block size β
|
||||
:param tau: guessing dimension τ
|
||||
:param mitm: simulate MITM approach (√ of search space)
|
||||
:param reduction_cost_model: BKZ reduction cost model
|
||||
|
||||
EXAMPLE:
|
||||
|
||||
hybrid_decoding_attack(beta = 100, tau = 250, mitm = True, reduction_cost_model = est.BKZ.sieve, **example_64())
|
||||
|
||||
rop: 2^65.1
|
||||
pre: 2^64.8
|
||||
enum: 2^62.5
|
||||
beta: 100
|
||||
|S|: 2^73.1
|
||||
prob: 0.104533
|
||||
scale: 12.760
|
||||
pp: 11
|
||||
d: 1798
|
||||
repeat: 42
|
||||
|
||||
"""
|
||||
|
||||
n, alpha, q = est.Param.preprocess(n, alpha, q)
|
||||
|
||||
# d is the dimension of the attack lattice
|
||||
d = m + n - tau
|
||||
|
||||
# h is the Hamming weight of the secret
|
||||
# NOTE: binary secrets are assumed to have Hamming weight ~n/2, ternary secrets ~2n/3
|
||||
# this aligns with the assumptions made in the LWE Estimator
|
||||
h = est.SDis.nonzero(secret_distribution, n=n)
|
||||
sd = alpha*q/sqrt(2*pi)
|
||||
|
||||
# compute the scaling factor used in the primal lattice to balance the secret and error
|
||||
scale = est._primal_scale_factor(secret_distribution, alpha=alpha, q=q, n=n)
|
||||
|
||||
# 1. get squared-GSO lengths via the Geometric Series Assumption
|
||||
# we could also consider using the BKZ simulator, using the GSA is conservative
|
||||
r = sq_GSO(d, beta, q**m * scale**(n-tau))
|
||||
|
||||
# 2. Costs
|
||||
bkz_cost = est.lattice_reduction_cost(reduction_cost_model, est.delta_0f(beta), d)
|
||||
enm_cost = est.Cost()
|
||||
enm_cost["rop"] = d**2/(2**1.06)
|
||||
|
||||
# 3. Size of search space
|
||||
# We need to do one BDD call at least
|
||||
search_space, prob, hw = ZZ(1), 1.0, 0
|
||||
|
||||
# if mitm is True, sqrt speedup in the guessing phase. This allows us to square the size
|
||||
# of the search space at no extra cost.
|
||||
# NOTE: we conservatively assume that this mitm process succeeds with probability 1.
|
||||
ssf = sqrt if mitm else lambda x: x
|
||||
|
||||
# use the secret distribution bounds to determine the size of the search space
|
||||
a, b = est.SDis.bounds(secret_distribution)
|
||||
|
||||
# perform "searching". This part of the code balances the enm_cost with the cost of lattice
|
||||
# reduction, where enm_cost is the total cost of calling Babai's algorithm on each vector in
|
||||
# the search space.
|
||||
|
||||
if tau:
|
||||
prob = est.success_probability_drop(n, h, tau)
|
||||
hw = 1
|
||||
while hw < h and hw < tau:
|
||||
prob += est.success_probability_drop(n, h, tau, fail=hw)
|
||||
search_space += binomial(tau, hw) * (b-a)**hw
|
||||
|
||||
if enm_cost.repeat(ssf(search_space))["rop"] > bkz_cost["rop"]:
|
||||
# we moved too far, so undo
|
||||
prob -= est.success_probability_drop(n, h, tau, fail=hw)
|
||||
search_space -= binomial(tau, hw) * (b-a)**hw
|
||||
hw -= 1
|
||||
break
|
||||
hw += 1
|
||||
|
||||
enm_cost = enm_cost.repeat(ssf(search_space))
|
||||
|
||||
# we use the expectation of the target norm. This could be longer, or shorter, for any given instance.
|
||||
target_norm = sqrt(m * sd**2 + h * RR((n-tau)/n) * scale**2)
|
||||
|
||||
# account for the success probability of Babai's algorithm
|
||||
prob*=babai_probability_wun16(r, target_norm)
|
||||
|
||||
# create a cost string, as in the LWE Estimator, to store the attack parameters and costs
|
||||
ret = est.Cost()
|
||||
ret["rop"] = bkz_cost["rop"] + enm_cost["rop"]
|
||||
ret["pre"] = bkz_cost["rop"]
|
||||
ret["enum"] = enm_cost["rop"]
|
||||
ret["beta"] = beta
|
||||
ret["|S|"] = search_space
|
||||
ret["prob"] = prob
|
||||
ret["scale"] = scale
|
||||
ret["pp"] = hw
|
||||
ret["d"] = d
|
||||
ret["tau"] = tau
|
||||
|
||||
# 5. Repeat whole experiment ~1/prob times
|
||||
ret = ret.repeat(est.amplify(0.99, prob), select={"rop": True,
|
||||
"pre": True,
|
||||
"enum": True,
|
||||
"beta": False,
|
||||
"d": False,
|
||||
"|S|": False,
|
||||
"scale": False,
|
||||
"prob": False,
|
||||
"pp": False,
|
||||
"tau": False})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
## Optimize attack parameters
|
||||
|
||||
def parameter_search(n, alpha, q, m, secret_distribution, mitm = True, reduction_cost_model=est.BKZ.sieve):
|
||||
|
||||
"""
|
||||
:param n: LWE dimension `n > 0`
|
||||
:param alpha: noise rate `0 ≤ α < 1`, noise will have standard deviation `αq/sqrt{2π}`
|
||||
:param q: modulus `0 < q`
|
||||
:param m: number of LWE samples `m > 0`
|
||||
:param secret_distribution: distribution of secret
|
||||
:param beta_search: tuple (β_min, β_max, granularity) for the search space of β, default is (60,301,20)
|
||||
:param tau: tuple (τ_min, τ_max, granularity) for the search space of τ, default is (0,501,20)
|
||||
:param mitm: simulate MITM approach (√ of search space)
|
||||
:param reduction_cost_model: BKZ reduction cost model
|
||||
|
||||
EXAMPLE:
|
||||
|
||||
parameter_search(mitm = False, reduction_cost_model = est.BKZ.sieve, **example_64())
|
||||
|
||||
rop: 2^69.5
|
||||
pre: 2^68.9
|
||||
enum: 2^68.0
|
||||
beta: 110
|
||||
|S|: 2^40.9
|
||||
prob: 0.045060
|
||||
scale: 12.760
|
||||
pp: 6
|
||||
d: 1730
|
||||
repeat: 100
|
||||
tau: 170
|
||||
|
||||
parameter_search(mitm = True, reduction_cost_model = est.BKZ.sieve, **example_64())
|
||||
|
||||
rop: 2^63.4
|
||||
pre: 2^63.0
|
||||
enum: 2^61.5
|
||||
beta: 95
|
||||
|S|: 2^72.0
|
||||
prob: 0.125126
|
||||
scale: 12.760
|
||||
pp: 11
|
||||
d: 1666
|
||||
repeat: 35
|
||||
tau: 234
|
||||
|
||||
"""
|
||||
|
||||
primald = est.partial(est.drop_and_solve, est.dual_scale, postprocess=True, decision=True)
|
||||
bl = primald(n, alpha, q, secret_distribution=secret_distribution, m=m, reduction_cost_model=reduction_cost_model)
|
||||
|
||||
# we take the number of LWE samples used to be the same as in the primal attack in the LWE Estimator
|
||||
m = bl["m"]
|
||||
|
||||
f = est.partial(hybrid_decoding_attack, n=n, alpha=alpha, q=q, m=m, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model,
|
||||
mitm=mitm)
|
||||
|
||||
# NOTE: we decribe our searching strategy below. To produce more accurate estimates,
|
||||
# change this part of the code to ensure a more granular search. As we are using
|
||||
# homomorphic-encryption style parameters, the running time of the code can be quite high,
|
||||
# justifying the below choices.
|
||||
# We start at beta = 60 and go up to beta_max in steps of 50
|
||||
|
||||
beta_max = bl["beta"] + 100
|
||||
beta_search = (40, beta_max, 50)
|
||||
|
||||
best = None
|
||||
for beta in range(beta_search[0], beta_search[1], beta_search[2])[::-1]:
|
||||
tau = 0
|
||||
best_beta = None
|
||||
count = 3
|
||||
while tau < n:
|
||||
if count >= 0:
|
||||
cost = f(beta=beta, tau=tau)
|
||||
if best_beta is not None:
|
||||
# if two consecutive estimates don't decrease, stop optimising over tau
|
||||
if best_beta["rop"] < cost["rop"]:
|
||||
count -= 1
|
||||
cost["tau"] = tau
|
||||
if best_beta is None:
|
||||
best_beta = cost
|
||||
if RR(log(cost["rop"],2)) < RR(log(best_beta["rop"],2)):
|
||||
best_beta = cost
|
||||
if best is None:
|
||||
best = cost
|
||||
if RR(log(cost["rop"],2)) < RR(log(best["rop"],2)):
|
||||
best = cost
|
||||
tau += n//100
|
||||
|
||||
# now do a second, more granular search
|
||||
# we start at the beta which produced the lowest running time, and search ± 25 in steps of 10
|
||||
tau_gap = max(n//100, 1)
|
||||
for beta in range(best["beta"] - 25, best["beta"] + 25, 10)[::-1]:
|
||||
tau = max(best["tau"] - 25,0)
|
||||
best_beta = None
|
||||
count = 3
|
||||
while tau <= best["tau"] + 25:
|
||||
if count >= 0:
|
||||
cost = f(beta=beta, tau=tau)
|
||||
if best_beta is not None:
|
||||
# if two consecutive estimates don't decrease, stop optimising over tau
|
||||
if best_beta["rop"] < cost["rop"]:
|
||||
count -= 1
|
||||
cost["tau"] = tau
|
||||
if best_beta is None:
|
||||
best_beta = cost
|
||||
if RR(log(cost["rop"],2)) < RR(log(best_beta["rop"],2)):
|
||||
best_beta = cost
|
||||
if best is None:
|
||||
best = cost
|
||||
if RR(log(cost["rop"],2)) < RR(log(best["rop"],2)):
|
||||
best = cost
|
||||
tau += tau_gap
|
||||
|
||||
return best
|
||||
|
||||
def get_all_security_levels(params):
|
||||
""" A function which gets the security levels of a collection of TFHE parameters,
|
||||
using the four cost models: classical, quantum, classical_conservative, and
|
||||
quantum_conservative
|
||||
:param params: a dictionary of LWE parameter sets (see concrete_params)
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = get_all_security_levels(concrete_LWE_params)
|
||||
sage: X
|
||||
[['LWE128_256',
|
||||
126.692189756144,
|
||||
117.566189756144,
|
||||
98.6960000000000,
|
||||
89.5700000000000], ...]
|
||||
"""
|
||||
|
||||
RESULTS = []
|
||||
|
||||
for param in params:
|
||||
|
||||
results = [param]
|
||||
x = params["{}".format(param)]
|
||||
n = x["n"] * x["k"]
|
||||
q = 2 ** 32
|
||||
sd = 2 ** (x["sd"]) * q
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
secret_distribution = (0, 1)
|
||||
# assume access to an infinite number of papers
|
||||
m = oo
|
||||
|
||||
model = est.BKZ.sieve
|
||||
estimate = parameter_search(mitm = True, reduction_cost_model = est.BKZ.sieve, n = n, q = q, alpha = alpha, m = m, secret_distribution = secret_distribution)
|
||||
results.append(get_security_level(estimate))
|
||||
|
||||
RESULTS.append(results)
|
||||
|
||||
return RESULTS
|
||||
@@ -1,17 +0,0 @@
|
||||
from estimator_new import *
|
||||
from sage.all import oo, save
|
||||
|
||||
def test():
|
||||
|
||||
# code
|
||||
D = ND.DiscreteGaussian
|
||||
params = LWE.Parameters(n=1024, q=2 ** 64, Xs=D(0.50, -0.50), Xe=D(2**57), m=oo, tag='TFHE_DEFAULT')
|
||||
|
||||
names = [params, params.updated(n=761), params.updated(q=2 ** 65), params.updated(n=762)]
|
||||
|
||||
for name in names:
|
||||
x = LWE.estimate(name, deny_list=("arora-gb", "bkw"))
|
||||
|
||||
return 0
|
||||
|
||||
test()
|
||||
@@ -1,31 +0,0 @@
|
||||
|
||||
from multiprocessing import *
|
||||
from estimator_new import *
|
||||
from sage.all import oo, save
|
||||
|
||||
|
||||
def test_memory(x):
|
||||
print("doing job...")
|
||||
print(x)
|
||||
y = LWE.estimate(x, deny_list=("arora-gb", "bkw"))
|
||||
return y
|
||||
|
||||
if __name__ == "__main__":
|
||||
D = ND.DiscreteGaussian
|
||||
params = LWE.Parameters(n=1024, q=2 ** 64, Xs=D(0.50, -0.50), Xe=D(2**57), m=oo, tag='TFHE_DEFAULT')
|
||||
|
||||
names = [params, params.updated(n=761), params.updated(q=2**65), params.updated(n=762)]
|
||||
procs = []
|
||||
proc = Process(target=print_func)
|
||||
procs.append(proc)
|
||||
proc.start()
|
||||
p = Pool(1)
|
||||
|
||||
for name in names:
|
||||
proc = Process(target=test_memory, args=(name,))
|
||||
procs.append(proc)
|
||||
proc.start()
|
||||
proc.join()
|
||||
|
||||
for proc in procs:
|
||||
proc.join()
|
||||
@@ -1,471 +0,0 @@
|
||||
from estimator_new import *
|
||||
from sage.all import oo, save, load, ceil
|
||||
from math import log2
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def old_models(security_level, sd, logq=32):
|
||||
"""
|
||||
Use the old model as a starting point for the data gathering step
|
||||
:param security_level: the security level under consideration
|
||||
:param sd : the standard deviation of the LWE error distribution Xe
|
||||
:param logq : the (base 2 log) value of the LWE modulus q
|
||||
"""
|
||||
|
||||
def evaluate_model(a, b, stddev=sd):
|
||||
return (stddev - b)/a
|
||||
|
||||
models = dict()
|
||||
|
||||
models["80"] = (-0.04049295502947623, 1.1288318226557081 + logq)
|
||||
models["96"] = (-0.03416314056943681, 1.4704806061716345 + logq)
|
||||
models["112"] = (-0.02970984362676178, 1.7848907787798667 + logq)
|
||||
models["128"] = (-0.026361288425133814, 2.0014671315214696 + logq)
|
||||
models["144"] = (-0.023744534465622812, 2.1710601038230712 + logq)
|
||||
models["160"] = (-0.021667220727651954, 2.3565507936475476 + logq)
|
||||
models["176"] = (-0.019947662046189942, 2.5109588704235803 + logq)
|
||||
models["192"] = (-0.018552804646747204, 2.7168913723130816 + logq)
|
||||
models["208"] = (-0.017291091126923574, 2.7956961446214326 + logq)
|
||||
models["224"] = (-0.016257546811508806, 2.9582401000615226 + logq)
|
||||
models["240"] = (-0.015329741032015766, 3.0744579055889782 + logq)
|
||||
models["256"] = (-0.014530554319171845, 3.2094375376751745 + logq)
|
||||
|
||||
(a, b) = models["{}".format(security_level)]
|
||||
n_est = evaluate_model(a, b, sd)
|
||||
|
||||
return round(n_est)
|
||||
|
||||
|
||||
def estimate(params, red_cost_model=RC.BDGL16, skip=("arora-gb", "bkw")):
|
||||
"""
|
||||
Retrieve an estimate using the Lattice Estimator, for a given set of input parameters
|
||||
:param params: the input LWE parameters
|
||||
:param red_cost_model: the lattice reduction cost model
|
||||
:param skip: attacks to skip
|
||||
"""
|
||||
|
||||
est = LWE.estimate(params, red_cost_model=red_cost_model, deny_list=skip)
|
||||
|
||||
return est
|
||||
|
||||
|
||||
def get_security_level(est, dp=2):
|
||||
"""
|
||||
Get the security level lambda from a Lattice Estimator output
|
||||
:param est: the Lattice Estimator output
|
||||
:param dp: the number of decimal places to consider
|
||||
"""
|
||||
attack_costs = []
|
||||
# note: key does not need to be specified est vs est.keys()
|
||||
for key in est:
|
||||
attack_costs.append(est[key]["rop"])
|
||||
# get the security level correct to 'dp' decimal places
|
||||
security_level = round(log2(min(attack_costs)), dp)
|
||||
|
||||
return security_level
|
||||
|
||||
|
||||
def inequality(x, y):
|
||||
""" A utility function which compresses the conditions x < y and x > y into a single condition via a multiplier
|
||||
:param x: the LHS of the inequality
|
||||
:param y: the RHS of the inequality
|
||||
"""
|
||||
if x <= y:
|
||||
return 1
|
||||
|
||||
if x > y:
|
||||
return -1
|
||||
|
||||
|
||||
def automated_param_select_n(params, target_security=128):
|
||||
""" A function used to generate the smallest value of n which allows for
|
||||
target_security bits of security, for the input values of (params.Xe.stddev,params.q)
|
||||
:param params: the standard deviation of the error
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = automated_param_select_n(Kyber512, target_security = 128)
|
||||
sage: X
|
||||
456
|
||||
"""
|
||||
|
||||
# get an estimate based on the prev. model
|
||||
print("n = {}".format(params.n))
|
||||
n_start = old_models(target_security, log2(params.Xe.stddev), log2(params.q))
|
||||
# n_start = max(n_start, 450)
|
||||
# TODO: think about throwing an error if the required n < 450
|
||||
|
||||
params = params.updated(n=n_start)
|
||||
costs2 = estimate(params)
|
||||
security_level = get_security_level(costs2, 2)
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
# we keep n > 2 * target_security as a rough baseline for mitm security (on binary key guessing)
|
||||
while z * security_level < z * target_security:
|
||||
# TODO: fill in this case! For n > 1024 we only need to consider every 256 (optimization)
|
||||
params = params.updated(n = params.n + z * 8)
|
||||
costs = estimate(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
if -1 * params.Xe.stddev > 0:
|
||||
print("target security level is unattainable")
|
||||
break
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
# we make n larger
|
||||
print("we make n larger")
|
||||
params = params.updated(n=params.n + 8)
|
||||
costs = estimate(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(params.n,
|
||||
log2(params.Xe.stddev),
|
||||
log2(params.q),
|
||||
security_level))
|
||||
|
||||
if security_level < target_security:
|
||||
params.updated(n=None)
|
||||
|
||||
return params, security_level
|
||||
|
||||
|
||||
def generate_parameter_matrix(params_in, sd_range, target_security_levels=[128], name="default_name"):
|
||||
"""
|
||||
:param params_in: a initial set of LWE parameters
|
||||
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
|
||||
:param target_security_levels: a list of the target number of bits of security, 128 is default
|
||||
:param name: a name to save the file
|
||||
"""
|
||||
|
||||
(sd_min, sd_max) = sd_range
|
||||
for lam in target_security_levels:
|
||||
for sd in range(sd_min, sd_max + 1):
|
||||
print("run for {}".format(lam, sd))
|
||||
Xe_new = nd.NoiseDistribution.DiscreteGaussian(2**sd)
|
||||
(params_out, sec) = automated_param_select_n(params_in.updated(Xe=Xe_new), target_security=lam)
|
||||
|
||||
try:
|
||||
results = load("{}.sobj".format(name))
|
||||
except:
|
||||
results = dict()
|
||||
results["{}".format(lam)] = []
|
||||
|
||||
results["{}".format(lam)].append((params_out.n, log2(params_out.q), log2(params_out.Xe.stddev), sec))
|
||||
save(results, "{}.sobj".format(name))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def generate_zama_curves64(sd_range=[2, 58], target_security_levels=[128], name="default_name"):
|
||||
"""
|
||||
The top level function which we use to run the experiment
|
||||
|
||||
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
|
||||
:param target_security_levels: a list of the target number of bits of security, 128 is default
|
||||
:param name: a name to save the file
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
|
||||
D = ND.DiscreteGaussian
|
||||
vals = range(sd_range[0], sd_range[1])
|
||||
pool = multiprocessing.Pool(2)
|
||||
init_params = LWE.Parameters(n=1024, q=2 ** 64, Xs=D(0.50, -0.50), Xe=D(2 ** 55), m=oo, tag='params')
|
||||
inputs = [(init_params, (val, val), target_security_levels, name) for val in vals]
|
||||
res = pool.starmap(generate_parameter_matrix, inputs)
|
||||
|
||||
return "done"
|
||||
|
||||
|
||||
# The script runs the following commands
|
||||
import sys
|
||||
# grab values of the command-line input arguments
|
||||
a = int(sys.argv[1])
|
||||
b = int(sys.argv[2])
|
||||
c = int(sys.argv[3])
|
||||
# run the code
|
||||
generate_zama_curves64(sd_range= (b,c), target_security_levels=[a], name="{}".format(a))
|
||||
|
||||
from estimator_new import *
|
||||
from sage.all import oo, save, load
|
||||
from math import log2
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def old_models(security_level, sd, logq=32):
|
||||
"""
|
||||
Use the old model as a starting point for the data gathering step
|
||||
:param security_level: the security level under consideration
|
||||
:param sd : the standard deviation of the LWE error distribution Xe
|
||||
:param logq : the (base 2 log) value of the LWE modulus q
|
||||
"""
|
||||
|
||||
def evaluate_model(a, b, stddev=sd):
|
||||
return (stddev - b)/a
|
||||
|
||||
models = dict()
|
||||
|
||||
models["80"] = (-0.04049295502947623, 1.1288318226557081 + logq)
|
||||
models["96"] = (-0.03416314056943681, 1.4704806061716345 + logq)
|
||||
models["112"] = (-0.02970984362676178, 1.7848907787798667 + logq)
|
||||
models["128"] = (-0.026361288425133814, 2.0014671315214696 + logq)
|
||||
models["144"] = (-0.023744534465622812, 2.1710601038230712 + logq)
|
||||
models["160"] = (-0.021667220727651954, 2.3565507936475476 + logq)
|
||||
models["176"] = (-0.019947662046189942, 2.5109588704235803 + logq)
|
||||
models["192"] = (-0.018552804646747204, 2.7168913723130816 + logq)
|
||||
models["208"] = (-0.017291091126923574, 2.7956961446214326 + logq)
|
||||
models["224"] = (-0.016257546811508806, 2.9582401000615226 + logq)
|
||||
models["240"] = (-0.015329741032015766, 3.0744579055889782 + logq)
|
||||
models["256"] = (-0.014530554319171845, 3.2094375376751745 + logq)
|
||||
|
||||
(a, b) = models["{}".format(security_level)]
|
||||
n_est = evaluate_model(a, b, sd)
|
||||
|
||||
return round(n_est)
|
||||
|
||||
|
||||
def estimate(params, red_cost_model=RC.BDGL16, skip=("arora-gb", "bkw")):
|
||||
"""
|
||||
Retrieve an estimate using the Lattice Estimator, for a given set of input parameters
|
||||
:param params: the input LWE parameters
|
||||
:param red_cost_model: the lattice reduction cost model
|
||||
:param skip: attacks to skip
|
||||
"""
|
||||
|
||||
est = LWE.estimate(params, red_cost_model=red_cost_model, deny_list=skip)
|
||||
|
||||
return est
|
||||
|
||||
|
||||
def get_security_level(est, dp=2):
|
||||
"""
|
||||
Get the security level lambda from a Lattice Estimator output
|
||||
:param est: the Lattice Estimator output
|
||||
:param dp: the number of decimal places to consider
|
||||
"""
|
||||
attack_costs = []
|
||||
# note: key does not need to be specified est vs est.keys()
|
||||
for key in est:
|
||||
attack_costs.append(est[key]["rop"])
|
||||
# get the security level correct to 'dp' decimal places
|
||||
security_level = round(log2(min(attack_costs)), dp)
|
||||
|
||||
return security_level
|
||||
|
||||
|
||||
def inequality(x, y):
|
||||
""" A utility function which compresses the conditions x < y and x > y into a single condition via a multiplier
|
||||
:param x: the LHS of the inequality
|
||||
:param y: the RHS of the inequality
|
||||
"""
|
||||
if x <= y:
|
||||
return 1
|
||||
|
||||
if x > y:
|
||||
return -1
|
||||
|
||||
|
||||
def automated_param_select_n(params, target_security=128):
|
||||
""" A function used to generate the smallest value of n which allows for
|
||||
target_security bits of security, for the input values of (params.Xe.stddev,params.q)
|
||||
:param params: the standard deviation of the error
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = automated_param_select_n(Kyber512, target_security = 128)
|
||||
sage: X
|
||||
456
|
||||
"""
|
||||
|
||||
# get an estimate based on the prev. model
|
||||
print("n = {}".format(params.n))
|
||||
n_start = old_models(target_security, log2(params.Xe.stddev), log2(params.q))
|
||||
# n_start = max(n_start, 450)
|
||||
# TODO: think about throwing an error if the required n < 450
|
||||
|
||||
params = params.updated(n=n_start)
|
||||
costs2 = estimate(params)
|
||||
security_level = get_security_level(costs2, 2)
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
# we keep n > 2 * target_security as a rough baseline for mitm security (on binary key guessing)
|
||||
while z * security_level < z * target_security:
|
||||
# TODO: fill in this case! For n > 1024 we only need to consider every 256 (optimization)
|
||||
params = params.updated(n = params.n + z * 8)
|
||||
costs = estimate(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
if -1 * params.Xe.stddev > 0:
|
||||
print("target security level is unattainable")
|
||||
break
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
# we make n larger
|
||||
print("we make n larger")
|
||||
params = params.updated(n=params.n + 8)
|
||||
costs = estimate(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(params.n,
|
||||
log2(params.Xe.stddev),
|
||||
log2(params.q),
|
||||
security_level))
|
||||
|
||||
if security_level < target_security:
|
||||
params.updated(n=None)
|
||||
|
||||
return params, security_level
|
||||
|
||||
|
||||
def generate_parameter_matrix(params_in, sd_range, target_security_levels=[128], name="default_name"):
|
||||
"""
|
||||
:param params_in: a initial set of LWE parameters
|
||||
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
|
||||
:param target_security_levels: a list of the target number of bits of security, 128 is default
|
||||
:param name: a name to save the file
|
||||
"""
|
||||
|
||||
(sd_min, sd_max) = sd_range
|
||||
for lam in target_security_levels:
|
||||
for sd in range(sd_min, sd_max + 1):
|
||||
print("run for {}".format(lam, sd))
|
||||
Xe_new = nd.NoiseDistribution.DiscreteGaussian(2**sd)
|
||||
(params_out, sec) = automated_param_select_n(params_in.updated(Xe=Xe_new), target_security=lam)
|
||||
|
||||
try:
|
||||
results = load("{}.sobj".format(name))
|
||||
except:
|
||||
results = dict()
|
||||
results["{}".format(lam)] = []
|
||||
|
||||
results["{}".format(lam)].append((params_out.n, log2(params_out.q), log2(params_out.Xe.stddev), sec))
|
||||
save(results, "{}.sobj".format(name))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def generate_zama_curves64(sd_range=[2, 58], target_security_levels=[128], name="default_name"):
|
||||
"""
|
||||
The top level function which we use to run the experiment
|
||||
|
||||
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
|
||||
:param target_security_levels: a list of the target number of bits of security, 128 is default
|
||||
:param name: a name to save the file
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
|
||||
D = ND.DiscreteGaussian
|
||||
vals = range(sd_range[0], sd_range[1])
|
||||
pool = multiprocessing.Pool(2)
|
||||
init_params = LWE.Parameters(n=1024, q=2 ** 64, Xs=D(0.50, -0.50), Xe=D(2 ** 55), m=oo, tag='params')
|
||||
inputs = [(init_params, (val, val), target_security_levels, name) for val in vals]
|
||||
res = pool.starmap(generate_parameter_matrix, inputs)
|
||||
|
||||
return "done"
|
||||
|
||||
|
||||
# The script runs the following commands
|
||||
import sys
|
||||
# grab values of the command-line input arguments
|
||||
a = int(sys.argv[1])
|
||||
b = int(sys.argv[2])
|
||||
c = int(sys.argv[3])
|
||||
# run the code
|
||||
generate_zama_curves64(sd_range= (b,c), target_security_levels=[a], name="{}".format(a))
|
||||
|
||||
import numpy as np
|
||||
from sage.all import save, load
|
||||
|
||||
def sort_data(security_level):
|
||||
from operator import itemgetter
|
||||
|
||||
# step 1. load the data
|
||||
X = load("{}.sobj".format(security_level))
|
||||
|
||||
# step 2. sort by SD
|
||||
x = sorted(X["{}".format(security_level)], key = itemgetter(2))
|
||||
|
||||
# step3. replace the sorted value
|
||||
X["{}".format(security_level)] = x
|
||||
|
||||
return X
|
||||
|
||||
def generate_curve(security_level):
|
||||
|
||||
# step 1. get the data
|
||||
X = sort_data(security_level)
|
||||
|
||||
# step 2. group the n and sigma data into lists
|
||||
N = []
|
||||
SD = []
|
||||
for x in X["{}".format(security_level)]:
|
||||
N.append(x[0])
|
||||
SD.append(x[2] + 0.5)
|
||||
|
||||
# step 3. perform interpolation and return coefficients
|
||||
(a,b) = np.polyfit(N, SD, 1)
|
||||
|
||||
return a, b
|
||||
|
||||
|
||||
def verify_curve(security_level, a = None, b = None):
|
||||
|
||||
# step 1. get the table and max values of n, sd
|
||||
X = sort_data(security_level)
|
||||
n_max = X["{}".format(security_level)][0][0]
|
||||
sd_max = X["{}".format(security_level)][-1][2]
|
||||
|
||||
# step 2. a function to get model values
|
||||
def f_model(a, b, n):
|
||||
return ceil(a * n + b)
|
||||
|
||||
# step 3. a function to get table values
|
||||
def f_table(table, n):
|
||||
for i in range(len(table)):
|
||||
n_val = table[i][0]
|
||||
if n < n_val:
|
||||
pass
|
||||
else:
|
||||
j = i
|
||||
break
|
||||
|
||||
# now j is the correct index, we return the corresponding sd
|
||||
return table[j][2]
|
||||
|
||||
# step 3. for each n, check whether we satisfy the table
|
||||
n_min = max(2 * security_level, 450, X["{}".format(security_level)][-1][0])
|
||||
print(n_min)
|
||||
print(n_max)
|
||||
|
||||
for n in range(n_max, n_min, - 1):
|
||||
model_sd = f_model(a, b, n)
|
||||
table_sd = f_table(X["{}".format(security_level)], n)
|
||||
print(n , table_sd, model_sd, model_sd >= table_sd)
|
||||
|
||||
if table_sd > model_sd:
|
||||
print("MODEL FAILS at n = {}".format(n))
|
||||
return "FAIL"
|
||||
|
||||
return "PASS", n_min
|
||||
|
||||
|
||||
def generate_and_verify(security_levels, log_q, name = "verified_curves"):
|
||||
|
||||
data = []
|
||||
|
||||
for sec in security_levels:
|
||||
print("WE GO FOR {}".format(sec))
|
||||
# generate the model for security level sec
|
||||
(a_sec, b_sec) = generate_curve(sec)
|
||||
# verify the model for security level sec
|
||||
res = verify_curve(sec, a_sec, b_sec)
|
||||
# append the information into a list
|
||||
data.append((a_sec, b_sec - log_q, sec, res[0], res[1]))
|
||||
save(data, "{}.sobj".format(name))
|
||||
|
||||
return data
|
||||
|
||||
# To verify the curves we use
|
||||
generate_and_verify([80, 96, 112, 128, 144, 160, 176, 192, 256], log_q = 64)
|
||||
|
||||
@@ -1,599 +0,0 @@
|
||||
import estimator.estimator as est
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# define the four cost models used for Concrete (2 classical, 2 quantum)
|
||||
# note that classical and quantum are the two models used in the "HE Std"
|
||||
|
||||
def classical(beta, d, B):
|
||||
return ZZ(2) ** RR(0.292 * beta + 16.4 + log(8 * d, 2))
|
||||
|
||||
|
||||
def quantum(beta, d, B):
|
||||
return ZZ(2) ** RR(0.265 * beta + 16.4 + log(8 * d, 2))
|
||||
|
||||
|
||||
def classical_conservative(beta, d, B):
|
||||
return ZZ(2) ** RR(0.292 * beta)
|
||||
|
||||
|
||||
def quantum_conservative(beta, d, B):
|
||||
return ZZ(2) ** RR(0.265 * beta)
|
||||
|
||||
# we add an enumeration model for completeness
|
||||
cost_models = [classical, quantum, classical_conservative, quantum_conservative, est.BKZ.enum]
|
||||
|
||||
|
||||
def estimate_lwe_nocrash(n, alpha, q, secret_distribution,
|
||||
reduction_cost_model=est.BKZ.sieve, m=oo):
|
||||
"""
|
||||
A function to estimate the complexity of LWE, whilst skipping over any attacks which crash
|
||||
:param n : the LWE dimension
|
||||
:param alpha : the noise rate of the error
|
||||
:param q : the LWE ciphertext modulus
|
||||
:param secret_distribution : the LWE secret distribution
|
||||
:param reduction_cost_model: the BKZ reduction cost model
|
||||
:param m : the number of available LWE samples
|
||||
|
||||
EXAMPLE:
|
||||
sage: estimate_lwe_nocrash(n = 256, q = 2**32, alpha = RR(8/2**32), secret_distribution = (0,1))
|
||||
sage: 39.46
|
||||
"""
|
||||
|
||||
# the success value denotes whether we need to re-run the estimator, in the case of a crash
|
||||
success = 0
|
||||
|
||||
try:
|
||||
# we begin by trying all four attacks (usvp, dual, dec, mitm)
|
||||
estimate = est.estimate_lwe(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo,
|
||||
skip={"bkw", "dec", "arora-gb"})
|
||||
success = 1
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
if success == 0:
|
||||
try:
|
||||
# dual crashes most often, so try skipping dual first
|
||||
estimate = est.estimate_lwe(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo,
|
||||
skip={"bkw", "dec", "arora-gb", "dual"})
|
||||
success = 1
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
if success == 0:
|
||||
try:
|
||||
# next, skip mitm
|
||||
estimate = est.estimate_lwe(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo,
|
||||
skip={"mitm", "bkw", "dec", "arora-gb", "dual"})
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
# the output security level is just the cost of the fastest attack
|
||||
security_level = get_security_level(estimate)
|
||||
|
||||
return security_level
|
||||
|
||||
|
||||
|
||||
def get_security_level(estimate, decimal_places = 2):
|
||||
""" Function to get the security level from an LWE Estimator output,
|
||||
i.e. returns only the bit-security level (without the attack params)
|
||||
:param estimate: the input estimate
|
||||
:param decimal_places: the number of decimal places
|
||||
|
||||
EXAMPLE:
|
||||
sage: x = estimate_lwe(n = 256, q = 2**32, alpha = RR(8/2**32))
|
||||
sage: get_security_level(x)
|
||||
33.8016789754458
|
||||
"""
|
||||
|
||||
levels = []
|
||||
|
||||
# use try/except to cover cases where we only consider one or two attacks
|
||||
|
||||
try:
|
||||
levels.append(estimate["usvp"]["rop"])
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
levels.append(estimate["dec"]["rop"])
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
levels.append(estimate["dual"]["rop"])
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
levels.append(estimate["mitm"]["rop"])
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
# take the minimum attack cost (in bits)
|
||||
security_level = round(log(min(levels), 2), decimal_places)
|
||||
|
||||
return security_level
|
||||
|
||||
|
||||
def inequality(x, y):
|
||||
""" A function which compresses the conditions
|
||||
x < y and x > y into a single condition via a
|
||||
multiplier
|
||||
"""
|
||||
if x <= y:
|
||||
return 1
|
||||
|
||||
if x > y:
|
||||
return -1
|
||||
|
||||
|
||||
def automated_param_select_n(sd, n=None, q=2 ** 32, reduction_cost_model=est.BKZ.sieve, secret_distribution=(0, 1),
|
||||
target_security=128):
|
||||
""" A function used to generate the smallest value of n which allows for
|
||||
target_security bits of security, for the input values of (sd,q)
|
||||
:param sd: the standard deviation of the error
|
||||
:param n: an initial value of n to use in optimisation, guessed if None
|
||||
:param q: the LWE modulus (q = 2**32, 2**64 in TFHE)
|
||||
:param reduction_cost_model: the BKZ cost model considered, BKZ.sieve is default
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = automated_param_select_n(sd = -25, q = 2**32)
|
||||
sage: X
|
||||
1054
|
||||
"""
|
||||
|
||||
if n is None:
|
||||
# pick some random n which gets us close (based on concrete_LWE_params)
|
||||
n = sd * (-25) * (target_security/80)
|
||||
|
||||
sd = 2 ** sd * q
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
|
||||
|
||||
# initial estimate, to determine if we are above or below the target security level
|
||||
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
while z * security_level < z * target_security and n > 80:
|
||||
n += z * 8
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
|
||||
if (-1 * sd > 0):
|
||||
print("target security level is unatainable")
|
||||
break
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
n -= z * 8
|
||||
alpha = sqrt(2 * pi) * sd / RR(q)
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(n,
|
||||
sd,
|
||||
log(q,
|
||||
2),
|
||||
security_level))
|
||||
|
||||
# final sanity check so we don't return insecure (or inf) parameters
|
||||
if security_level < target_security or security_level == oo:
|
||||
n = None
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def automated_param_select_sd(n, sd=None, q=2**32, reduction_cost_model=est.BKZ.sieve, secret_distribution=(0, 1),
|
||||
target_security=128):
|
||||
""" A function used to generate the smallest value of sd which allows for
|
||||
target_security bits of security, for the input values of (n,q)
|
||||
:param n: the LWE dimension
|
||||
:param sd: an initial value of sd to use in optimisation, guessed if None
|
||||
:param q: the LWE modulus (q = 2**32, 2**64 in TFHE)
|
||||
:param reduction_cost_model: the BKZ cost model considered, BKZ.sieve is default
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE
|
||||
sage: X = automated_param_select_sd(n = 1054, q = 2**32)
|
||||
sage: X
|
||||
-26
|
||||
"""
|
||||
|
||||
if sd is None:
|
||||
# pick some random sd which gets us close (based on concrete_LWE_params)
|
||||
sd = round(n * 80 / (target_security * (-25)))
|
||||
|
||||
# make sure sd satisfies q * sd > 1
|
||||
sd = max(sd, -(log(q,2) - 2))
|
||||
|
||||
sd_ = (2 ** sd) * q
|
||||
alpha = sqrt(2 * pi) * sd_ / RR(q)
|
||||
|
||||
# initial estimate, to determine if we are above or below the target security level
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
while z * security_level < z * target_security and sd > -log(q,2):
|
||||
|
||||
sd += z * (0.5)
|
||||
sd_ = (2 ** sd) * q
|
||||
alpha = sqrt(2 * pi) * sd_ / RR(q)
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
|
||||
## THIS IS WHERE THE PROBLEM IS, CORRECT THIS CONDITION?
|
||||
if (sd > log(q, 2)):
|
||||
print("target security level is unatainable")
|
||||
return None
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
sd -= z * (0.5)
|
||||
sd_ = (2 ** sd) * q
|
||||
alpha = sqrt(2 * pi) * sd_ / RR(q)
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(n,
|
||||
sd,
|
||||
log(q,
|
||||
2),
|
||||
security_level))
|
||||
|
||||
return sd
|
||||
|
||||
|
||||
def generate_parameter_matrix(n_range, sd=None, q=2**32, reduction_cost_model=est.BKZ.sieve,
|
||||
secret_distribution=(0, 1), target_security=128):
|
||||
"""
|
||||
:param n_range: a tuple (n_min, n_max) giving the values of n for which to generate parameters
|
||||
:param sd: the standard deviation of the LWE error
|
||||
:param q: the LWE modulus (q = 2**32, 2**64 in TFHE)
|
||||
:param reduction_cost_model: the BKZ cost model considered, BKZ.sieve is default
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
TODO: we should probably parallelise this function for speed
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = generate_parameter_matrix([788, 790])
|
||||
sage: X
|
||||
[(788, 4294967296, -20.0), (789, 4294967296, -20.0)]
|
||||
"""
|
||||
|
||||
RESULTS = []
|
||||
|
||||
# grab min and max value/s of n, with a granularity (if given as input)
|
||||
try:
|
||||
(n_min, n_max, gran) = n_range
|
||||
except:
|
||||
(n_min, n_max) = n_range
|
||||
gran = 1
|
||||
|
||||
sd_ = sd
|
||||
|
||||
for n in range(n_min, n_max + 1, gran):
|
||||
sd = automated_param_select_sd(n, sd=sd_, q=q, reduction_cost_model=reduction_cost_model,
|
||||
secret_distribution=secret_distribution, target_security=target_security)
|
||||
sd_ = sd
|
||||
RESULTS.append((n, q, sd))
|
||||
|
||||
return RESULTS
|
||||
|
||||
|
||||
def generate_parameter_matrix_sd(sd_range, n=None, q=2**32, reduction_cost_model=est.BKZ.sieve,
|
||||
secret_distribution=(0, 1), target_security=128):
|
||||
"""
|
||||
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
|
||||
:param sd: the standard deviation of the LWE error
|
||||
:param q: the LWE modulus (q = 2**32, 2**64 in TFHE)
|
||||
:param reduction_cost_model: the BKZ cost model considered, BKZ.sieve is default
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
TODO: we should probably parallelise this function for speed
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = generate_parameter_matrix([788, 790])
|
||||
sage: X
|
||||
[(788, 4294967296, -20.0), (789, 4294967296, -20.0)]
|
||||
"""
|
||||
|
||||
RESULTS = []
|
||||
|
||||
# grab min and max value/s of n
|
||||
(sd_min, sd_max) = sd_range
|
||||
|
||||
n = n
|
||||
|
||||
for sd in range(sd_min, sd_max + 1):
|
||||
n = automated_param_select_n(sd, n=n, q=q, reduction_cost_model=reduction_cost_model,
|
||||
secret_distribution=secret_distribution, target_security=target_security)
|
||||
RESULTS.append((n, q, sd))
|
||||
|
||||
return RESULTS
|
||||
|
||||
|
||||
def generate_parameter_step(results, label = None, torus_sd = True):
|
||||
"""
|
||||
Plot results
|
||||
:param results: an output of generate_parameter_matrix
|
||||
|
||||
returns: a step plot of chosen parameters
|
||||
|
||||
EXAMPLE:
|
||||
X = generate_parameter_matrix([700, 790])
|
||||
generate_parameter_step(X)
|
||||
plt.show()
|
||||
"""
|
||||
|
||||
N = []
|
||||
SD = []
|
||||
|
||||
for (n, q, sd) in results:
|
||||
N.append(n)
|
||||
if torus_sd:
|
||||
SD.append(sd)
|
||||
else:
|
||||
SD.append(sd + log(q,2))
|
||||
|
||||
plt.plot(N, SD, label = label)
|
||||
plt.legend(loc = "upper right")
|
||||
|
||||
return plt
|
||||
|
||||
|
||||
def generate_iso_lines(N = [256, 2048], SD = [0, 32], q = 2**32):
|
||||
|
||||
RESULTS = []
|
||||
|
||||
for n in range(N[0], N[1] + 1, 1):
|
||||
for sd in range(SD[0], SD[1] + 1, 1):
|
||||
sd = 2**sd
|
||||
alpha = sqrt(2*pi) * sd / q
|
||||
try:
|
||||
est = est.estimate_lwe(n, alpha, q, secret_distribution = (0,1), reduction_cost_model = est.BKZ.sieve, skip = ("bkw", "arora-gb", "dec"))
|
||||
est = get_security_level(est, 2)
|
||||
except:
|
||||
est = est.estimate_lwe(n, alpha, q, secret_distribution = (0,1), reduction_cost_model = est.BKZ.sieve, skip = ("bkw", "arora-gb", "dual", "dec"))
|
||||
est = get_security_level(est, 2)
|
||||
RESULTS.append((n, sd, est))
|
||||
|
||||
return RESULTS
|
||||
|
||||
|
||||
def plot_iso_lines(results):
|
||||
|
||||
x1 = []
|
||||
x2 = []
|
||||
|
||||
for z in results:
|
||||
x1.append(z[0])
|
||||
# use log(q)
|
||||
# use -ve values to match Pascal's diagram
|
||||
x2.append(z[2])
|
||||
|
||||
plt.plot(x1, x2)
|
||||
|
||||
return plt
|
||||
|
||||
|
||||
def test_multiple_sd(n, q, secret_distribution, reduction_cost_model, split = 33, m = oo):
|
||||
est = []
|
||||
Y = []
|
||||
for sd_ in np.linspace(0,32,split):
|
||||
Y.append(sd_)
|
||||
sd = (2** (-1 * sd_))* q
|
||||
alpha = sqrt(2*pi) * sd / q
|
||||
try:
|
||||
es = est.estimate_lwe(n=512, alpha=alpha, q=q, secret_distribution=(0, 1), reduction_cost_model = reduction_cost_model,
|
||||
skip=("bkw", "dec", "arora-gb"), m = m)
|
||||
except:
|
||||
es = est.estimate_lwe(n=512, alpha=alpha, q=q, secret_distribution=(0, 1), reduction_cost_model = reduction_cost_model,
|
||||
skip=("bkw", "dec", "arora-gb", "dual"), m = m)
|
||||
est.append(get_security_level(es,2))
|
||||
|
||||
return est, Y
|
||||
|
||||
|
||||
## parameter curves
|
||||
|
||||
def get_parameter_curves_data_sd(sec_levels, sd_range, q):
|
||||
|
||||
Results = []
|
||||
for sec in sec_levels:
|
||||
try:
|
||||
result_sec = generate_parameter_matrix_sd(n = None, sd_range=sd_range, q=q, reduction_cost_model=est.BKZ.sieve,
|
||||
secret_distribution=(0,1), target_security=sec)
|
||||
Results.append(result_sec)
|
||||
except:
|
||||
pass
|
||||
|
||||
return Results
|
||||
|
||||
|
||||
def get_parameter_curves_data_n(sec_levels, n_range, q):
|
||||
|
||||
Results = []
|
||||
for sec in sec_levels:
|
||||
try:
|
||||
result_sec = generate_parameter_matrix(n_range, sd = None, q=q, reduction_cost_model=est.BKZ.sieve,
|
||||
secret_distribution=(0,1), target_security=sec)
|
||||
Results.append(result_sec)
|
||||
except:
|
||||
pass
|
||||
|
||||
return Results
|
||||
|
||||
|
||||
def interpolate_result(result, log_q):
|
||||
|
||||
# linear function interpolation
|
||||
x = []
|
||||
y = []
|
||||
|
||||
# 1. filter out any points which reccomend sd = -log(q) + 2
|
||||
new_result= []
|
||||
for res in result:
|
||||
if res[2] >= - log_q + 2:
|
||||
new_result.append(res)
|
||||
|
||||
result = new_result
|
||||
for res in result:
|
||||
x.append(res[0])
|
||||
y.append(res[2])
|
||||
|
||||
|
||||
(a,b) = np.polyfit(x, y, 1)
|
||||
|
||||
return (a,b)
|
||||
|
||||
|
||||
def plot_interpolants(interpolants, n_range, log_q, degree = 1):
|
||||
for x in interpolants:
|
||||
if degree == 1:
|
||||
vals = [x[0] * n + x[1] for n in range(n_range[0],n_range[1])]
|
||||
elif degree == 2:
|
||||
vals = [x[0] * n**2 + x[1]*n + x[2] for n in range(n_range[0],n_range[1])]
|
||||
# any values which fall outside of the range and edited to give at least two bits of noise.
|
||||
|
||||
vvals = []
|
||||
for v in vals:
|
||||
if v < -log_q + 2:
|
||||
vvals.append(-log_q + 2)
|
||||
else:
|
||||
vvals.append(v)
|
||||
|
||||
plt.plot(range(n_range[0], n_range[0] + len(vvals)), vvals)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
## currently running
|
||||
# sage: n_range = (256, 2048, 16)
|
||||
# sage: sec_levels = [80 + 16*k for k in range(0,12)]
|
||||
# sage: results = get_parameter_curves_data_n(sec_levels, n_range, q = 2**64)
|
||||
|
||||
def verify_results(results, security_level, secret_distribution = (0,1), reduction_cost_model = est.BKZ.sieve):
|
||||
""" A function which verifies that a set of results match a given security level
|
||||
:param results : a set of tuples of the form (n, q, sd)
|
||||
:param security_level: the target security level for these params
|
||||
"""
|
||||
|
||||
estimates = []
|
||||
|
||||
# 1. Grab the parameters
|
||||
for (n, q, sd) in results:
|
||||
if sd is not None:
|
||||
sd = 2**sd
|
||||
alpha = sqrt(2*pi) * sd
|
||||
|
||||
# 2. Test that these parameters satisfy the given security level
|
||||
try:
|
||||
estimate = est.estimate_lwe(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo, skip = {"bkw","dec","arora-gb"})
|
||||
except:
|
||||
estimate = est.estimate_lwe(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo,
|
||||
skip={"bkw", "dec", "arora-gb", "dual"})
|
||||
|
||||
estimates.append(estimate)
|
||||
|
||||
return estimates
|
||||
|
||||
|
||||
def verify_interpolants(interpolant, n_range, log_q, secret_distribution = (0,1), reduction_cost_model = est.BKZ.sieve):
|
||||
|
||||
estimates = []
|
||||
q = 2**log_q
|
||||
(a, b) = interpolant
|
||||
|
||||
for n in range(n_range[0], n_range[1]):
|
||||
print(n)
|
||||
# we take the max here to ensure that the "cut-off" for the minimal error is met.
|
||||
sd = max(a * n + b, -log_q + 2)
|
||||
sd = 2 ** sd
|
||||
alpha = sqrt(2*pi) * sd
|
||||
|
||||
security_level = estimate_lwe_nocrash(n, alpha, q, secret_distribution=secret_distribution,
|
||||
reduction_cost_model=reduction_cost_model, m=oo)
|
||||
print(security_level)
|
||||
if security_level == oo:
|
||||
security_level = 0
|
||||
estimates.append(security_level)
|
||||
|
||||
return estimates
|
||||
|
||||
def get_zama_curves():
|
||||
|
||||
# hardcode the parameters for now
|
||||
n_range = [128, 2048, 32]
|
||||
sec_levels = [80 + 16*k for k in range(0,12)]
|
||||
results = get_parameter_curves_data_n(sec_levels, n_range, q = 2**64)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def test_curves():
|
||||
|
||||
# a small hardcoded example for testing purposes
|
||||
|
||||
n_range = [256, 1024, 128]
|
||||
sec_levels = [80, 128, 256]
|
||||
results = get_parameter_curves_data_n(sec_levels, n_range, q = 2**64)
|
||||
|
||||
return results
|
||||
|
||||
def find_nalpha(l, sec_lvl):
|
||||
for j in range(len(l)):
|
||||
if l[j] != oo and l[j] > sec_lvl:
|
||||
return j
|
||||
|
||||
|
||||
## we start with 80/128/192/256-bits of security
|
||||
|
||||
## lambda = 80
|
||||
## z = verify_interpolants(interps[0], (128,2048), 64)
|
||||
## i = 0
|
||||
## min(z[i:]) = 80.36
|
||||
## so the model is sd(n) = max(-0.04047677865612648 * n + 1.1433465085639063, log_q - 2), n >= 128
|
||||
|
||||
|
||||
## lambda = 128
|
||||
## z = verify_interpolants(interps[3], (128,2048), 64)
|
||||
## i = 83
|
||||
## min(z[i:]) = 128.02
|
||||
## so the model is sd(n) = max(-0.026374888765705498 * n + 2.012143923330495, log_q - 2), n >= 211 ( = 128 + 83)
|
||||
|
||||
|
||||
## lambda = 192
|
||||
## z = verify_interpolants(interps[7], (128,2048), 64)
|
||||
## i = 304
|
||||
## min(z[i:]) = 192.19
|
||||
## so the model is sd(n) = max(-0.018504919354426233 * n + 2.6634073426215843, log_q - 2), n >= 432 ( = 128 + 212)
|
||||
|
||||
|
||||
## lambda = 256
|
||||
## z = verify_interpolants(interps[-1], (128,2048), 64)
|
||||
## i = 653
|
||||
## min(z[i:]) = 256.25
|
||||
## so the model is sd(n) = max(-0.014327640360322604 * n + 2.899270827311091), log_q - 2), n >= 781 ( = 128 + 653)
|
||||
Reference in New Issue
Block a user