mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-15 23:25:01 -05:00
start with something simple
This commit is contained in:
19
estimator_new/__init__.py
Normal file
19
estimator_new/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from .nd import NoiseDistribution as ND # noqa
|
||||
from .io import Logging # noqa
|
||||
from . import reduction as RC # noqa
|
||||
from . import simulator as Simulator # noqa
|
||||
from . import lwe as LWE # noqa
|
||||
|
||||
from .schemes import ( # noqa
|
||||
Kyber512,
|
||||
Kyber768,
|
||||
Kyber1024,
|
||||
LightSaber,
|
||||
Saber,
|
||||
FireSaber,
|
||||
NTRUHPS2048509Enc,
|
||||
NTRUHPS2048677Enc,
|
||||
NTRUHPS4096821Enc,
|
||||
NTRUHRSS701Enc,
|
||||
)
|
||||
13
estimator_new/conf.py
Normal file
13
estimator_new/conf.py
Normal file
@@ -0,0 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Default values.
|
||||
"""
|
||||
|
||||
from .reduction import Kyber, ABLR21
|
||||
from .simulator import GSA
|
||||
|
||||
red_cost_model = Kyber
|
||||
red_cost_model_classical_poly_space = ABLR21
|
||||
red_shape_model = "gsa"
|
||||
red_simulator = GSA
|
||||
mitm_opt = "analytical"
|
||||
258
estimator_new/cost.py
Normal file
258
estimator_new/cost.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import round, log, oo
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cost:
|
||||
"""
|
||||
Algorithms costs.
|
||||
"""
|
||||
|
||||
rop: float = oo
|
||||
tag: str = None
|
||||
|
||||
# An entry is "impermanent" if it grows when we run the algorithm again. For example, `δ`
|
||||
# would not scale with the number of operations but `rop` would. This check is strict such that
|
||||
# unknown entries raise an error. This is to enforce a decision on whether an entry should be
|
||||
# scaled.
|
||||
|
||||
impermanents = {
|
||||
"rop": True,
|
||||
"repetitions": False,
|
||||
"tag": False,
|
||||
"problem": False,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def register_impermanent(cls, data=None, **kwds):
|
||||
if data is not None:
|
||||
for k, v in data.items():
|
||||
if cls.impermanents.get(k, v) != v:
|
||||
raise ValueError(f"Attempting to overwrite {k}:{cls.impermanents[k]} with {v}")
|
||||
cls.impermanents[k] = v
|
||||
|
||||
for k, v in kwds.items():
|
||||
if cls.impermanents.get(k, v) != v:
|
||||
raise ValueError(f"Attempting to overwrite {k}:{cls.impermanents[k]} with {v}")
|
||||
cls.impermanents[k] = v
|
||||
|
||||
key_map = {
|
||||
"delta": "δ",
|
||||
"beta": "β",
|
||||
"eta": "η",
|
||||
"epsilon": "ε",
|
||||
"zeta": "ζ",
|
||||
"ell": "ℓ",
|
||||
"repetitions": "↻",
|
||||
}
|
||||
val_map = {"beta": "%8d", "d": "%8d", "delta": "%8.6f"}
|
||||
|
||||
def __init__(self, **kwds):
|
||||
for k, v in kwds.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def str(self, keyword_width=None, newline=None, round_bound=2048, compact=False): # noqa C901
|
||||
"""
|
||||
|
||||
:param keyword_width: keys are printed with this width
|
||||
:param newline: insert a newline
|
||||
:param round_bound: values beyond this bound are represented as powers of two
|
||||
:param compact: do not add extra whitespace to align entries
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> s = Cost(delta=5, bar=2)
|
||||
>>> s
|
||||
δ: 5.000000, bar: 2
|
||||
|
||||
"""
|
||||
|
||||
def wfmtf(k):
|
||||
if keyword_width:
|
||||
fmt = "%%%ss" % keyword_width
|
||||
else:
|
||||
fmt = "%s"
|
||||
return fmt % k
|
||||
|
||||
d = self.__dict__
|
||||
s = []
|
||||
for k, v in d.items():
|
||||
if k == "problem": # we store the problem instance in a cost object for reference
|
||||
continue
|
||||
kk = wfmtf(self.key_map.get(k, k))
|
||||
try:
|
||||
if (1 / round_bound < abs(v) < round_bound) or (not v) or (k in self.val_map):
|
||||
if abs(v % 1) < 0.0000001:
|
||||
vv = self.val_map.get(k, "%8d") % round(v)
|
||||
else:
|
||||
vv = self.val_map.get(k, "%8.3f") % v
|
||||
else:
|
||||
vv = "%7s" % ("≈2^%.1f" % log(v, 2))
|
||||
except TypeError: # strings and such
|
||||
vv = "%8s" % v
|
||||
if compact:
|
||||
kk = kk.strip()
|
||||
vv = vv.strip()
|
||||
s.append(f"{kk}: {vv}")
|
||||
|
||||
if not newline:
|
||||
return ", ".join(s)
|
||||
else:
|
||||
return "\n".join(s)
|
||||
|
||||
def reorder(self, *args):
|
||||
"""
|
||||
Return a new ordered dict from the key:value pairs in dictinonary but reordered such that the
|
||||
keys given to this function come first.
|
||||
|
||||
:param args: keys which should come first (in order)
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> d = Cost(a=1,b=2,c=3); d
|
||||
a: 1, b: 2, c: 3
|
||||
|
||||
>>> d.reorder("b","c","a")
|
||||
b: 2, c: 3, a: 1
|
||||
|
||||
"""
|
||||
keys = list(self.__dict__.keys())
|
||||
for key in args:
|
||||
keys.pop(keys.index(key))
|
||||
keys = list(args) + keys
|
||||
r = dict()
|
||||
for key in keys:
|
||||
r[key] = self.__dict__[key]
|
||||
return Cost(**r)
|
||||
|
||||
def filter(self, **keys):
|
||||
"""
|
||||
Return new ordered dictinonary from dictionary restricted to the keys.
|
||||
|
||||
:param dictionary: input dictionary
|
||||
:param keys: keys which should be copied (ordered)
|
||||
"""
|
||||
r = dict()
|
||||
for key in keys:
|
||||
r[key] = self.__dict__[key]
|
||||
return Cost(**r)
|
||||
|
||||
def repeat(self, times, select=None):
|
||||
"""
|
||||
Return a report with all costs multiplied by ``times``.
|
||||
|
||||
:param times: the number of times it should be run
|
||||
:param select: toggle which fields ought to be repeated and which should not
|
||||
:returns: a new cost estimate
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> c0 = Cost(a=1, b=2)
|
||||
>>> c0.register_impermanent(a=True, b=False)
|
||||
>>> c0.repeat(1000)
|
||||
a: 1000, b: 2, ↻: 1000
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> Cost(rop=1).repeat(1000).repeat(1000)
|
||||
rop: ≈2^19.9, ↻: ≈2^19.9
|
||||
|
||||
"""
|
||||
impermanents = dict(self.impermanents)
|
||||
|
||||
if select is not None:
|
||||
for key in select:
|
||||
impermanents[key] = select[key]
|
||||
|
||||
ret = dict()
|
||||
for key in self.__dict__:
|
||||
try:
|
||||
if impermanents[key]:
|
||||
ret[key] = times * self.__dict__[key]
|
||||
else:
|
||||
ret[key] = self.__dict__[key]
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
f"You found a bug, this function does not know about '{key}' but should."
|
||||
)
|
||||
ret["repetitions"] = times * ret.get("repetitions", 1)
|
||||
return Cost(**ret)
|
||||
|
||||
def __rmul__(self, times):
|
||||
return self.repeat(times)
|
||||
|
||||
def combine(self, right, base=None):
|
||||
"""Combine ``left`` and ``right``.
|
||||
|
||||
:param left: cost dictionary
|
||||
:param right: cost dictionary
|
||||
:param base: add entries to ``base``
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.cost import Cost
|
||||
>>> c0 = Cost(a=1)
|
||||
>>> c1 = Cost(b=2)
|
||||
>>> c2 = Cost(c=3)
|
||||
>>> c0.combine(c1)
|
||||
a: 1, b: 2
|
||||
>>> c0.combine(c1, base=c2)
|
||||
c: 3, a: 1, b: 2
|
||||
|
||||
"""
|
||||
if base is None:
|
||||
cost = dict()
|
||||
else:
|
||||
cost = base.__dict__
|
||||
for key in self.__dict__:
|
||||
cost[key] = self.__dict__[key]
|
||||
for key in right:
|
||||
cost[key] = right.__dict__[key]
|
||||
return Cost(**cost)
|
||||
|
||||
def __bool__(self):
|
||||
return self.__dict__.get("rop", oo) < oo
|
||||
|
||||
def __add__(self, other):
|
||||
return self.combine(self, other)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict__[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.__dict__[key]
|
||||
|
||||
def get(self, key, default):
|
||||
return self.__dict__.get(key, default)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__[key] = value
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__dict__)
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
def __repr__(self):
|
||||
return self.str(compact=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.str(newline=True, keyword_width=12)
|
||||
|
||||
def __lt__(self, other):
|
||||
try:
|
||||
return self["rop"] < other["rop"]
|
||||
except AttributeError:
|
||||
return self["rop"] < other
|
||||
|
||||
def __le__(self, other):
|
||||
try:
|
||||
return self["rop"] <= other["rop"]
|
||||
except AttributeError:
|
||||
return self["rop"] <= other
|
||||
15
estimator_new/errors.py
Normal file
15
estimator_new/errors.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
class OutOfBoundsError(ValueError):
|
||||
"""
|
||||
Used to indicate a wrong value, for example δ < 1.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InsufficientSamplesError(ValueError):
|
||||
"""
|
||||
Used to indicate the number of samples given is too small.
|
||||
"""
|
||||
|
||||
pass
|
||||
252
estimator_new/gb.py
Normal file
252
estimator_new/gb.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using Gröbner bases.
|
||||
|
||||
See :ref:`Arora-GB` for an overview.
|
||||
|
||||
"""
|
||||
from sage.all import (
|
||||
PowerSeriesRing,
|
||||
QQ,
|
||||
RR,
|
||||
oo,
|
||||
binomial,
|
||||
sqrt,
|
||||
ceil,
|
||||
floor,
|
||||
exp,
|
||||
log,
|
||||
pi,
|
||||
RealField,
|
||||
)
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .io import Logging
|
||||
|
||||
|
||||
def gb_cost(n, D, omega=2, prec=None):
|
||||
"""
|
||||
Estimate the complexity of computing a Gröbner basis.
|
||||
|
||||
:param n: Number of variables n > 0.
|
||||
:param D: Tuple of `(d,m)` pairs where `m` is number polynomials and `d` is a degree.
|
||||
:param omega: Linear algebra exponent, i.e. matrix-multiplication costs `O(n^ω)` operations.
|
||||
:param prec: Compute power series up to this precision (default: `2n`).
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.gb import gb_cost
|
||||
>>> gb_cost(128, [(2, 256)])
|
||||
rop: ≈2^144.6, dreg: 17, mem: ≈2^144.6
|
||||
|
||||
"""
|
||||
prec = 2 * n if prec is None else prec
|
||||
|
||||
R = PowerSeriesRing(QQ, "z", prec)
|
||||
z = R.gen()
|
||||
z = z.add_bigoh(prec)
|
||||
s = R(1)
|
||||
s = s.add_bigoh(prec)
|
||||
|
||||
for d, m in D:
|
||||
s *= (1 - z ** d) ** m
|
||||
s /= (1 - z) ** n
|
||||
|
||||
retval = Cost(rop=oo, dreg=oo)
|
||||
retval.register_impermanent({"rop": True, "dreg": False, "mem": False})
|
||||
|
||||
for dreg in range(prec):
|
||||
if s[dreg] < 0:
|
||||
break
|
||||
else:
|
||||
return retval
|
||||
|
||||
retval["dreg"] = dreg
|
||||
retval["rop"] = binomial(n + dreg, dreg) ** omega
|
||||
retval["mem"] = binomial(n + dreg, dreg) ** 2
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
class AroraGB:
|
||||
@staticmethod
|
||||
def ps_single(C):
|
||||
"""
|
||||
Probability that a Gaussian is within `C` standard deviations.
|
||||
"""
|
||||
RR = RealField(256)
|
||||
C = RR(C)
|
||||
return RR(1 - (RR(2) / (C * RR(sqrt(2 * pi))) * exp(-(C ** 2) / RR(2)))) # noqa
|
||||
|
||||
@classmethod
|
||||
def cost_bounded(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
|
||||
"""
|
||||
Estimate cost using absolute bounds for secrets and noise.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: target success probability
|
||||
:param omega: linear algebra constant.
|
||||
|
||||
"""
|
||||
d = params.Xe.bounds[1] - params.Xe.bounds[0] + 1
|
||||
dn = cls.equations_for_secret(params)
|
||||
cost = gb_cost(params.n, [(d, params.m)] + dn)
|
||||
cost["t"] = (d - 1) // 2
|
||||
if cost["dreg"] < oo and binomial(params.n + cost["dreg"], cost["dreg"]) < params.m:
|
||||
cost["m"] = binomial(params.n + cost["dreg"], cost["dreg"])
|
||||
else:
|
||||
cost["m"] = params.m
|
||||
cost.register_impermanent(t=False, m=True)
|
||||
return cost
|
||||
|
||||
@classmethod
|
||||
def cost_Gaussian_like(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
|
||||
"""
|
||||
Estimate cost using absolute bounds for secrets and Gaussian tail bounds for noise.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: target success probability
|
||||
:param omega: linear algebra constant.
|
||||
|
||||
"""
|
||||
dn = cls.equations_for_secret(params)
|
||||
|
||||
best, stuck = None, 0
|
||||
for t in range(ceil(params.Xe.stddev), params.n):
|
||||
d = 2 * t + 1
|
||||
C = RR(t / params.Xe.stddev)
|
||||
assert C >= 1 # if C is too small, we ignore it
|
||||
# Pr[success]^m = Pr[overall success]
|
||||
single_prob = AroraGB.ps_single(C)
|
||||
m_req = log(success_probability, 2) / log(single_prob, 2)
|
||||
m_req = floor(m_req)
|
||||
|
||||
if m_req > params.m:
|
||||
break
|
||||
|
||||
current = gb_cost(params.n, [(d, m_req)] + dn, omega)
|
||||
|
||||
if current["dreg"] == oo:
|
||||
continue
|
||||
|
||||
current["t"] = t
|
||||
current["m"] = m_req
|
||||
current.register_impermanent(t=False, m=True)
|
||||
current = current.reorder("rop", "m", "dreg", "t")
|
||||
|
||||
Logging.log("repeat", log_level + 1, f"{repr(current)}")
|
||||
|
||||
if best is None:
|
||||
best = current
|
||||
else:
|
||||
if best > current:
|
||||
best = current
|
||||
stuck = 0
|
||||
else:
|
||||
stuck += 1
|
||||
if stuck >= 5:
|
||||
break
|
||||
|
||||
if best is None:
|
||||
best = Cost(rop=oo, dreg=oo)
|
||||
return best
|
||||
|
||||
@classmethod
|
||||
def equations_for_secret(cls, params):
|
||||
"""
|
||||
Return ``(d,n)`` tuple to encode that `n` equations of degree `d` are available from the LWE secret.
|
||||
|
||||
:param params: LWE parameters.
|
||||
|
||||
"""
|
||||
if params.Xs <= params.Xe:
|
||||
a, b = params.Xs.bounds
|
||||
if b - a < oo:
|
||||
d = b - a + 1
|
||||
elif params.Xs.is_Gaussian_like:
|
||||
d = 2 * ceil(3 * params.Xs.stddev) + 1
|
||||
else:
|
||||
raise NotImplementedError(f"Do not know how to handle {params.Xs}.")
|
||||
dn = [(d, params.n)]
|
||||
else:
|
||||
dn = []
|
||||
return dn
|
||||
|
||||
def __call__(
|
||||
self, params: LWEParameters, success_probability=0.99, omega=2, log_level=1, **kwds
|
||||
):
|
||||
"""
|
||||
Arora-GB as described in [ICALP:AroGe11]_, [EPRINT:ACFP14]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: targeted success probability < 1.
|
||||
:param omega: linear algebra constant.
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``m``: Number of samples consumed.
|
||||
- ``dreg``: The degree of regularity or "solving degree".
|
||||
- ``t``: Polynomials of degree 2t + 1 are considered.
|
||||
- ``mem``: Total memory usage.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=7681, Xs=ND.DiscreteGaussian(3.0), Xe=ND.DiscreteGaussian(3.0), m=2**50)
|
||||
>>> LWE.arora_gb(params)
|
||||
rop: ≈2^307.1, m: ≈2^46.8, dreg: 99, t: 25, mem: ≈2^307.1, tag: arora-gb
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> LWE.arora_gb(params.updated(m=2**120))
|
||||
rop: ≈2^282.6, m: ≈2^101.1, dreg: 83, t: 36, mem: ≈2^282.6, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xe=ND.UniformMod(7)))
|
||||
rop: ≈2^60.6, dreg: 7, mem: ≈2^60.6, t: 3, m: ≈2^30.3, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xe=ND.CenteredBinomial(8)))
|
||||
rop: ≈2^122.3, dreg: 19, mem: ≈2^122.3, t: 8, m: ≈2^50.0, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(5), Xe=ND.CenteredBinomial(4), m=1024))
|
||||
rop: ≈2^227.2, dreg: 54, mem: ≈2^227.2, t: 4, m: 1024, tag: arora-gb
|
||||
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(3), Xe=ND.CenteredBinomial(4), m=1024))
|
||||
rop: ≈2^189.9, dreg: 39, mem: ≈2^189.9, t: 4, m: 1024, tag: arora-gb
|
||||
|
||||
.. [EPRINT:ACFP14] Martin R. Albrecht, Carlos Cid, Jean-Charles Faugère & Ludovic Perret. (2014).
|
||||
Algebraic algorithms for LWE. https://eprint.iacr.org/2014/1018
|
||||
|
||||
.. [ICALP:AroGe11] Sanjeev Aror & Rong Ge. (2011). New algorithms for learning in presence of
|
||||
errors. In L. Aceto, M. Henzinger, & J. Sgall, ICALP 2011, Part I (pp. 403–415).:
|
||||
Springer, Heidelberg.
|
||||
"""
|
||||
params = params.normalize()
|
||||
|
||||
best = Cost(rop=oo, dreg=oo)
|
||||
|
||||
if params.Xe.is_bounded:
|
||||
cost = self.cost_bounded(
|
||||
params,
|
||||
success_probability=success_probability,
|
||||
omega=omega,
|
||||
log_level=log_level,
|
||||
)
|
||||
Logging.log("gb", log_level, f"b: {repr(cost)}")
|
||||
best = min(best, cost, key=lambda x: x["dreg"])
|
||||
|
||||
if params.Xe.is_Gaussian_like:
|
||||
cost = self.cost_Gaussian_like(
|
||||
params,
|
||||
success_probability=success_probability,
|
||||
omega=omega,
|
||||
log_level=log_level,
|
||||
)
|
||||
Logging.log("gb", log_level, f"G: {repr(cost)}")
|
||||
best = min(best, cost, key=lambda x: x["dreg"])
|
||||
|
||||
best["tag"] = "arora-gb"
|
||||
best["problem"] = params
|
||||
return best
|
||||
|
||||
__name__ = "arora_gb"
|
||||
|
||||
|
||||
arora_gb = AroraGB()
|
||||
57
estimator_new/io.py
Normal file
57
estimator_new/io.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
|
||||
class Logging:
|
||||
"""
|
||||
Control level of detail being printed.
|
||||
"""
|
||||
|
||||
plain_logger = logging.StreamHandler()
|
||||
plain_logger.setFormatter(logging.Formatter("%(message)s"))
|
||||
|
||||
detail_logger = logging.StreamHandler()
|
||||
detail_logger.setFormatter(logging.Formatter("[%(name)8s] %(message)s"))
|
||||
|
||||
logging.getLogger("estimator").handlers = [plain_logger]
|
||||
logging.getLogger("estimator").setLevel(logging.INFO)
|
||||
|
||||
loggers = ("batch", "bdd", "usvp", "bkw", "gb", "repeat", "guess", "bins", "dual")
|
||||
|
||||
CRITICAL = logging.CRITICAL
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
INFO = logging.INFO
|
||||
LEVEL0 = logging.INFO
|
||||
LEVEL1 = logging.INFO - 2
|
||||
LEVEL2 = logging.INFO - 4
|
||||
LEVEL3 = logging.INFO - 6
|
||||
LEVEL4 = logging.INFO - 8
|
||||
LEVEL5 = logging.DEBUG
|
||||
DEBUG = logging.DEBUG
|
||||
NOTSET = logging.NOTSET
|
||||
|
||||
for logger in loggers:
|
||||
logging.getLogger(logger).handlers = [detail_logger]
|
||||
logging.getLogger(logger).setLevel(logging.INFO)
|
||||
|
||||
@staticmethod
|
||||
def set_level(lvl, loggers=None):
|
||||
"""Set logging level
|
||||
|
||||
:param lvl: one of `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `LEVELX`, `DEBUG`, `NOTSET` with `X` ∈ [0,5]
|
||||
:param loggers: one of `Logging.loggers`, if `None` all loggers are used.
|
||||
|
||||
"""
|
||||
if loggers is None:
|
||||
loggers = Logging.loggers
|
||||
|
||||
for logger in loggers:
|
||||
logging.getLogger(logger).setLevel(lvl)
|
||||
|
||||
@classmethod
|
||||
def log(cls, logger, level, msg, *args, **kwds):
|
||||
level = int(level)
|
||||
return logging.getLogger(logger).log(
|
||||
cls.INFO - 2 * level, f"{{{level}}} " + msg, *args, **kwds
|
||||
)
|
||||
169
estimator_new/lwe.py
Normal file
169
estimator_new/lwe.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
High-level LWE interface
|
||||
"""
|
||||
|
||||
from .lwe_primal import primal_usvp, primal_bdd, primal_hybrid
|
||||
from .lwe_bkw import coded_bkw
|
||||
from .lwe_guess import exhaustive_search, mitm, distinguish # noqa
|
||||
from .lwe_dual import dual, dual_hybrid
|
||||
from .lwe_guess import guess_composition
|
||||
from .gb import arora_gb # noqa
|
||||
from .lwe_parameters import LWEParameters as Parameters # noqa
|
||||
|
||||
|
||||
class Estimate:
|
||||
@classmethod
|
||||
def rough(cls, params, jobs=1):
|
||||
"""
|
||||
This function makes the following somewhat routine assumptions:
|
||||
|
||||
- The GSA holds.
|
||||
- The Core-SVP model holds.
|
||||
|
||||
This function furthermore assumes the following heuristics:
|
||||
|
||||
- The primal hybrid attack only applies to sparse secrets.
|
||||
- The dual hybrid MITM attack only applies to sparse secrets.
|
||||
- Arora-GB only applies to bounded noise with at least `n^2` samples.
|
||||
- BKW is not competitive.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param jobs: Use multiple threads in parallel.
|
||||
|
||||
EXAMPLE ::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> _ = lwe.estimate.rough(Kyber512)
|
||||
usvp :: rop: ≈2^118.6, red: ≈2^118.6, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
dual_hybrid :: rop: ≈2^127.2, mem: ≈2^123.3, m: 512, red: ≈2^127.0, δ: 1.003756, β: 435, ...
|
||||
|
||||
|
||||
"""
|
||||
# NOTE: Don't import these at the top-level to avoid circular imports
|
||||
from functools import partial
|
||||
from .reduction import ADPS16
|
||||
from .util import batch_estimate, f_name
|
||||
|
||||
algorithms = {}
|
||||
|
||||
algorithms["usvp"] = partial(primal_usvp, red_cost_model=ADPS16, red_shape_model="gsa")
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["hybrid"] = partial(
|
||||
primal_hybrid, red_cost_model=ADPS16, red_shape_model="gsa"
|
||||
)
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["dual_mitm_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=ADPS16, mitm_optimization=True
|
||||
)
|
||||
else:
|
||||
algorithms["dual_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=ADPS16, mitm_optimization=False
|
||||
)
|
||||
|
||||
if params.m > params.n ** 2 and params.Xe.is_bounded:
|
||||
if params.Xs.is_sparse:
|
||||
algorithms["arora-gb"] = guess_composition(arora_gb.cost_bounded)
|
||||
else:
|
||||
algorithms["arora-gb"] = arora_gb.cost_bounded
|
||||
|
||||
res = batch_estimate(params, algorithms.values(), log_level=1, jobs=jobs)
|
||||
res = res[params]
|
||||
for algorithm in algorithms:
|
||||
for k, v in res.items():
|
||||
if f_name(algorithms[algorithm]) == k:
|
||||
print(f"{algorithm:20s} :: {repr(v)}")
|
||||
return res
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params,
|
||||
red_cost_model=None,
|
||||
red_shape_model=None,
|
||||
deny_list=("arora-gb",),
|
||||
add_list=tuple(),
|
||||
jobs=1,
|
||||
):
|
||||
"""
|
||||
Run all estimates.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param red_shape_model: How to model the shape of a reduced basis (applies to primal attacks)
|
||||
:param deny_list: skip these algorithms
|
||||
:param add_list: add these ``(name, function)`` pairs to the list of algorithms to estimate.a
|
||||
:param jobs: Use multiple threads in parallel.
|
||||
|
||||
EXAMPLE ::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> _ = lwe.estimate(Kyber512)
|
||||
bkw :: rop: ≈2^178.8, m: ≈2^166.8, mem: ≈2^167.8, b: 14, t1: 0, t2: 16, ℓ: 13, #cod: 448...
|
||||
usvp :: rop: ≈2^148.0, red: ≈2^148.0, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
bdd :: rop: ≈2^144.5, red: ≈2^143.8, svp: ≈2^143.0, β: 391, η: 421, d: 1013, tag: bdd
|
||||
dual :: rop: ≈2^169.9, mem: ≈2^130.0, m: 512, red: ≈2^169.7, δ: 1.003484, β: 484, d: 1024...
|
||||
dual_hybrid :: rop: ≈2^166.8, mem: ≈2^161.9, m: 512, red: ≈2^166.6, δ: 1.003541, β: 473, d: 1011...
|
||||
|
||||
"""
|
||||
from sage.all import oo
|
||||
from functools import partial
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import red_shape_model as red_shape_model_default
|
||||
from .util import batch_estimate, f_name
|
||||
|
||||
if red_cost_model is None:
|
||||
red_cost_model = red_cost_model_default
|
||||
if red_shape_model is None:
|
||||
red_shape_model = red_shape_model_default
|
||||
|
||||
algorithms = {}
|
||||
|
||||
algorithms["arora-gb"] = guess_composition(arora_gb)
|
||||
algorithms["bkw"] = coded_bkw
|
||||
|
||||
algorithms["usvp"] = partial(
|
||||
primal_usvp, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["bdd"] = partial(
|
||||
primal_bdd, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["hybrid"] = partial(
|
||||
primal_hybrid, red_cost_model=red_cost_model, red_shape_model=red_shape_model
|
||||
)
|
||||
algorithms["dual"] = partial(dual, red_cost_model=red_cost_model)
|
||||
algorithms["dual_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=red_cost_model, mitm_optimization=False
|
||||
)
|
||||
algorithms["dual_mitm_hybrid"] = partial(
|
||||
dual_hybrid, red_cost_model=red_cost_model, mitm_optimization=True
|
||||
)
|
||||
|
||||
for k in deny_list:
|
||||
del algorithms[k]
|
||||
for k, v in add_list:
|
||||
algorithms[k] = v
|
||||
|
||||
res_raw = batch_estimate(params, algorithms.values(), log_level=1, jobs=jobs)
|
||||
res_raw = res_raw[params]
|
||||
res = {}
|
||||
for algorithm in algorithms:
|
||||
for k, v in res_raw.items():
|
||||
if f_name(algorithms[algorithm]) == k:
|
||||
res[algorithm] = v
|
||||
|
||||
for algorithm in algorithms:
|
||||
for k, v in res.items():
|
||||
if algorithm == k:
|
||||
if v["rop"] == oo:
|
||||
continue
|
||||
if k == "hybrid" and res["bdd"]["rop"] < v["rop"]:
|
||||
continue
|
||||
if k == "dual_mitm_hybrid" and res["dual_hybrid"]["rop"] < v["rop"]:
|
||||
continue
|
||||
print(f"{algorithm:20s} :: {repr(v)}")
|
||||
return res
|
||||
|
||||
|
||||
estimate = Estimate()
|
||||
304
estimator_new/lwe_bkw.py
Normal file
304
estimator_new/lwe_bkw.py
Normal file
@@ -0,0 +1,304 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
See :ref:`Coded-BKW for LWE` for what is available.
|
||||
"""
|
||||
from sage.all import ceil, log, floor, sqrt, var, find_root, erf, oo
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .errors import InsufficientSamplesError
|
||||
from .prob import amplify_sigma
|
||||
from .nd import sigmaf
|
||||
from .io import Logging
|
||||
|
||||
cfft = 1 # convolutions mod q
|
||||
|
||||
|
||||
class CodedBKW:
|
||||
@staticmethod
|
||||
def N(i, sigma_set, b, q):
|
||||
"""
|
||||
Return `N_i` for the `i`-th `[N_i, b]` linear code.
|
||||
|
||||
:param i: index
|
||||
:param sigma_set: target noise level
|
||||
"""
|
||||
return floor(b / (1 - log(12 * sigma_set ** 2 / 2 ** i, q) / 2))
|
||||
|
||||
@staticmethod
|
||||
def ntest(n, ell, t1, t2, b, q): # noqa
|
||||
"""
|
||||
If the parameter ``ntest`` is not provided, we use this function to estimate it.
|
||||
|
||||
:param n: LWE dimension > 0.
|
||||
:param ell: Table size for hypothesis testing.
|
||||
:param t1: Number of normal BKW steps.
|
||||
:param t2: Number of coded BKW steps.
|
||||
:param b: Table size for BKW steps.
|
||||
|
||||
"""
|
||||
|
||||
# there is no hypothesis testing because we have enough normal BKW
|
||||
# tables to cover all of of n
|
||||
if t1 * b >= n:
|
||||
return 0
|
||||
|
||||
# solve for nest by aiming for ntop == 0
|
||||
ntest = var("ntest")
|
||||
sigma_set = sqrt(q ** (2 * (1 - ell / ntest)) / 12)
|
||||
ncod = sum([CodedBKW.N(i, sigma_set, b, q) for i in range(1, t2 + 1)])
|
||||
ntop = n - ncod - ntest - t1 * b
|
||||
|
||||
try:
|
||||
start = max(int(round(find_root(ntop, 2, n - t1 * b + 1, rtol=0.1))) - 1, 2)
|
||||
except RuntimeError:
|
||||
start = 2
|
||||
ntest_min = 1
|
||||
for ntest in range(start, n - t1 * b + 1):
|
||||
if abs(ntop(ntest=ntest).n()) < abs(ntop(ntest=ntest_min).n()):
|
||||
ntest_min = ntest
|
||||
else:
|
||||
break
|
||||
return int(ntest_min)
|
||||
|
||||
def t1(params: LWEParameters, ell, t2, b, ntest=None):
|
||||
"""
|
||||
We compute t1 from N_i by observing that any N_i ≤ b gives no advantage over vanilla
|
||||
BKW, but the estimates for coded BKW always assume quantisation noise, which is too
|
||||
pessimistic for N_i ≤ b.
|
||||
"""
|
||||
|
||||
t1 = 0
|
||||
if ntest is None:
|
||||
ntest = CodedBKW.ntest(params.n, ell, t1, t2, b, params.q)
|
||||
sigma_set = sqrt(params.q ** (2 * (1 - ell / ntest)) / 12)
|
||||
Ni = [CodedBKW.N(i, sigma_set, b, params.q) for i in range(1, t2 + 1)]
|
||||
t1 = len([e for e in Ni if e <= b])
|
||||
# there is no point in having more tables than needed to cover n
|
||||
if b * t1 > params.n:
|
||||
t1 = params.n // b
|
||||
return t1
|
||||
|
||||
@staticmethod
|
||||
def cost(
|
||||
t2: int,
|
||||
b: int,
|
||||
ntest: int,
|
||||
params: LWEParameters,
|
||||
success_probability=0.99,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Coded-BKW cost.
|
||||
|
||||
:param t2: Number of coded BKW steps (≥ 0).
|
||||
:param b: Table size (≥ 1).
|
||||
:param success_probability: Targeted success probability < 1.
|
||||
:param ntest: Number of coordinates to hypothesis test.
|
||||
|
||||
"""
|
||||
cost = Cost()
|
||||
|
||||
# Our cost is mainly determined by q^b, on the other hand there are expressions in q^(ℓ+1)
|
||||
# below, hence, we set ℓ = b - 1. This allows to achieve the performance reported in
|
||||
# [C:GuoJohSta15].
|
||||
|
||||
cost["b"] = b
|
||||
ell = b - 1 # noqa
|
||||
cost["ell"] = ell
|
||||
|
||||
secret_bounds = params.Xs.bounds
|
||||
if params.Xs.is_Gaussian_like and params.Xs.mean == 0:
|
||||
secret_bounds = (
|
||||
max(secret_bounds[0], -3 * params.Xs.stddev),
|
||||
min(secret_bounds[1], 3 * params.Xs.stddev),
|
||||
)
|
||||
|
||||
# base of the table size.
|
||||
zeta = secret_bounds[1] - secret_bounds[0] + 1
|
||||
|
||||
t1 = CodedBKW.t1(params, ell, t2, b, ntest)
|
||||
t2 -= t1
|
||||
|
||||
cost["t1"] = t1
|
||||
cost["t2"] = t2
|
||||
|
||||
cost.register_impermanent(t1=False, t2=False)
|
||||
|
||||
# compute ntest with the t1 just computed
|
||||
if ntest is None:
|
||||
ntest = CodedBKW.ntest(params.n, ell, t1, t2, b, params.q)
|
||||
|
||||
# if there's no ntest then there's no `σ_{set}` and hence no ncod
|
||||
if ntest:
|
||||
sigma_set = sqrt(params.q ** (2 * (1 - ell / ntest)) / 12)
|
||||
ncod = sum([CodedBKW.N(i, sigma_set, b, params.q) for i in range(1, t2 + 1)])
|
||||
else:
|
||||
ncod = 0
|
||||
|
||||
ntot = ncod + ntest
|
||||
ntop = max(params.n - ncod - ntest - t1 * b, 0)
|
||||
cost["#cod"] = ncod # coding step
|
||||
cost["#top"] = ntop # guessing step, typically zero
|
||||
cost["#test"] = ntest # hypothesis testing
|
||||
|
||||
cost.register_impermanent({"#cod": False, "#top": False, "#test": False})
|
||||
|
||||
# Theorem 1: quantization noise + addition noise
|
||||
coding_variance = params.Xs.stddev ** 2 * sigma_set ** 2 * ntot
|
||||
sigma_final = float(sqrt(2 ** (t1 + t2) * params.Xe.stddev ** 2 + coding_variance))
|
||||
|
||||
M = amplify_sigma(success_probability, sigmaf(sigma_final), params.q)
|
||||
if M is oo:
|
||||
cost["rop"] = oo
|
||||
cost["m"] = oo
|
||||
return cost
|
||||
m = (t1 + t2) * (params.q ** b - 1) / 2 + M
|
||||
cost["m"] = float(m)
|
||||
cost.register_impermanent(m=True)
|
||||
|
||||
if not params.Xs <= params.Xe:
|
||||
# Equation (7)
|
||||
n = params.n - t1 * b
|
||||
C0 = (m - n) * (params.n + 1) * ceil(n / (b - 1))
|
||||
assert C0 >= 0
|
||||
else:
|
||||
C0 = 0
|
||||
|
||||
# Equation (8)
|
||||
C1 = sum(
|
||||
[(params.n + 1 - i * b) * (m - i * (params.q ** b - 1) / 2) for i in range(1, t1 + 1)]
|
||||
)
|
||||
assert C1 >= 0
|
||||
|
||||
# Equation (9)
|
||||
C2_ = sum(
|
||||
[
|
||||
4 * (M + i * (params.q ** b - 1) / 2) * CodedBKW.N(i, sigma_set, b, params.q)
|
||||
for i in range(1, t2 + 1)
|
||||
]
|
||||
)
|
||||
C2 = float(C2_)
|
||||
for i in range(1, t2 + 1):
|
||||
C2 += float(
|
||||
ntop + ntest + sum([CodedBKW.N(j, sigma_set, b, params.q) for j in range(1, i + 1)])
|
||||
) * (M + (i - 1) * (params.q ** b - 1) / 2)
|
||||
assert C2 >= 0
|
||||
|
||||
# Equation (10)
|
||||
C3 = M * ntop * (2 * zeta + 1) ** ntop
|
||||
assert C3 >= 0
|
||||
|
||||
# Equation (11)
|
||||
C4_ = 4 * M * ntest
|
||||
C4 = C4_ + (2 * zeta + 1) ** ntop * (
|
||||
cfft * params.q ** (ell + 1) * (ell + 1) * log(params.q, 2) + params.q ** (ell + 1)
|
||||
)
|
||||
assert C4 >= 0
|
||||
|
||||
C = (C0 + C1 + C2 + C3 + C4) / (
|
||||
erf(zeta / sqrt(2 * params.Xe.stddev)) ** ntop
|
||||
) # TODO don't ignore success probability
|
||||
cost["rop"] = float(C)
|
||||
cost["mem"] = (t1 + t2) * params.q ** b
|
||||
|
||||
cost = cost.reorder("rop", "m", "mem", "b", "t1", "t2")
|
||||
cost["tag"] = "coded-bkw"
|
||||
cost["problem"] = params
|
||||
Logging.log("bkw", log_level + 1, f"{repr(cost)}")
|
||||
|
||||
return cost
|
||||
|
||||
@classmethod
|
||||
def b(
|
||||
cls,
|
||||
params: LWEParameters,
|
||||
ntest=None,
|
||||
log_level=1,
|
||||
):
|
||||
def sf(x, best):
|
||||
return (x["rop"] <= best["rop"]) and (best["m"] > params.m or x["m"] <= params.m)
|
||||
|
||||
# the outer search is over b, which determines the size of the tables: q^b
|
||||
b_max = 3 * ceil(log(params.q, 2))
|
||||
with local_minimum(2, b_max, smallerf=sf) as it_b:
|
||||
for b in it_b:
|
||||
# the inner search is over t2, the number of coded steps
|
||||
t2_max = min(params.n // b, ceil(3 * log(params.q, 2)))
|
||||
with local_minimum(2, t2_max, smallerf=sf) as it_t2:
|
||||
for t2 in it_t2:
|
||||
y = cls.cost(b=b, t2=t2, ntest=ntest, params=params)
|
||||
it_t2.update(y)
|
||||
it_b.update(it_t2.y)
|
||||
best = it_b.y
|
||||
|
||||
# the search cannot fail. It just outputs some X with X["oracle"]>m.
|
||||
if best["m"] > params.m:
|
||||
raise InsufficientSamplesError(
|
||||
f"Got m≈2^{float(log(params.m, 2.0)):.1f} samples, but require ≈2^{float(log(best['m'],2.0)):.1f}.",
|
||||
best["m"],
|
||||
)
|
||||
return best
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
ntest=None,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Coded-BKW as described in [C:GuoJohSta15]_.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param ntest: Number of coordinates to hypothesis test.
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``b``: BKW tables have size `q^b`.
|
||||
- ``t1``: Number of plain BKW tables.
|
||||
- ``t2``: Number of Coded-BKW tables.
|
||||
- ``ℓ``: Hypothesis testing has tables of size `q^{ℓ+1}`
|
||||
- ``#cod``: Number of coding steps.
|
||||
- ``#top``: Number of guessing steps (typically zero)
|
||||
- ``#test``: Number of coordinates to do hypothesis testing on.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from sage.all import oo
|
||||
>>> from estimator import *
|
||||
>>> LWE.coded_bkw(LightSaber.updated(m=oo))
|
||||
rop: ≈2^171.7, m: ≈2^159.4, mem: ≈2^160.4, b: 12, t1: 3, t2: 18, ℓ: 11, #cod: 423, #top: 1...
|
||||
|
||||
We may need to amplify the number of samples, which modifies the noise distribution::
|
||||
|
||||
>>> from sage.all import oo
|
||||
>>> from estimator import *
|
||||
>>> LightSaber
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=2.00), m=512, tag='LightSaber')
|
||||
>>> cost = LWE.coded_bkw(LightSaber); cost
|
||||
rop: ≈2^184.3, m: ≈2^172.2, mem: ≈2^173.2, b: 13, t1: 0, t2: 18, ℓ: 12, #cod: 456, #top: 0...
|
||||
>>> cost["problem"]
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=10.39), m=..., tag='LightSaber')
|
||||
|
||||
.. note :: See also [C:KirFou15]_.
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
try:
|
||||
cost = self.b(params, ntest=ntest, log_level=log_level)
|
||||
except InsufficientSamplesError as e:
|
||||
m = e.args[1]
|
||||
while True:
|
||||
params_ = params.amplify_m(m)
|
||||
try:
|
||||
cost = self.b(params_, ntest=ntest, log_level=log_level)
|
||||
break
|
||||
except InsufficientSamplesError as e:
|
||||
m = e.args[1]
|
||||
|
||||
return cost
|
||||
|
||||
|
||||
coded_bkw = CodedBKW()
|
||||
526
estimator_new/lwe_dual.py
Normal file
526
estimator_new/lwe_dual.py
Normal file
@@ -0,0 +1,526 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using dial attacks.
|
||||
|
||||
See :ref:`LWE Dual Attacks` for an introduction what is available.
|
||||
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
from dataclasses import replace
|
||||
|
||||
from sage.all import oo, ceil, sqrt, log, cached_function, exp
|
||||
from .reduction import delta as deltaf
|
||||
from .reduction import cost as costf
|
||||
from .reduction import ADPS16, BDGL16
|
||||
from .reduction import LLL
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify as prob_amplify
|
||||
from .io import Logging
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import mitm_opt as mitm_opt_default
|
||||
from .errors import OutOfBoundsError
|
||||
from .nd import NoiseDistribution
|
||||
from .lwe_guess import exhaustive_search, mitm, distinguish
|
||||
|
||||
|
||||
class DualHybrid:
|
||||
"""
|
||||
Estimate cost of solving LWE using dual attacks.
|
||||
"""
|
||||
|
||||
full_sieves = [ADPS16.__name__, BDGL16.__name__]
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def dual_reduce(
|
||||
delta: float,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
rho: float = 1.0,
|
||||
log_level=None,
|
||||
):
|
||||
"""
|
||||
Produce new LWE sample using a dual vector on first `n-ζ` coordinates of the secret. The
|
||||
length of the dual vector is given by `δ` in root Hermite form and using a possible
|
||||
scaling factor, i.e. `|v| = ρ ⋅ δ^d * q^((n-ζ)/d)`.
|
||||
|
||||
:param delta: Length of the vector in root Hermite form
|
||||
:param params: LWE parameters
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param rho: Factor introduced by obtaining multiple dual vectors
|
||||
:returns: new ``LWEParameters`` and ``m``
|
||||
|
||||
.. note :: This function assumes that the instance is normalized.
|
||||
|
||||
"""
|
||||
if not 0 <= zeta <= params.n:
|
||||
raise OutOfBoundsError(
|
||||
f"Splitting dimension {zeta} must be between 0 and n={params.n}."
|
||||
)
|
||||
|
||||
# Compute new secret distribution
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
if not 0 <= h1 <= h:
|
||||
raise OutOfBoundsError(f"Splitting weight {h1} must be between 0 and h={h}.")
|
||||
# assuming the non-zero entries are uniform
|
||||
p = h1 / 2
|
||||
red_Xs = NoiseDistribution.SparseTernary(params.n - zeta, h / 2 - p)
|
||||
slv_Xs = NoiseDistribution.SparseTernary(zeta, p)
|
||||
|
||||
if h1 == h:
|
||||
# no reason to do lattice reduction if we assume
|
||||
# that the hw on the reduction part is 0
|
||||
return replace(params, Xs=slv_Xs, m=oo), 1
|
||||
else:
|
||||
# distribution is i.i.d. for each coordinate
|
||||
red_Xs = replace(params.Xs, n=params.n - zeta)
|
||||
slv_Xs = replace(params.Xs, n=zeta)
|
||||
|
||||
c = red_Xs.stddev * params.q / params.Xe.stddev
|
||||
|
||||
# see if we have optimally many samples (as in [INDOCRYPT:EspJouKha20]) available
|
||||
m_ = max(1, ceil(sqrt(red_Xs.n * log(c) / log(delta))) - red_Xs.n)
|
||||
m_ = min(params.m, m_)
|
||||
|
||||
# Compute new noise as in [INDOCRYPT:EspJouKha20]
|
||||
sigma_ = rho * red_Xs.stddev * delta ** (m_ + red_Xs.n) / c ** (m_ / (m_ + red_Xs.n))
|
||||
slv_Xe = NoiseDistribution.DiscreteGaussian(params.q * sigma_)
|
||||
|
||||
slv_params = LWEParameters(
|
||||
n=zeta,
|
||||
q=params.q,
|
||||
Xs=slv_Xs,
|
||||
Xe=slv_Xe,
|
||||
)
|
||||
|
||||
# The m_ we compute there is the optimal number of samples that we pick from the input LWE
|
||||
# instance. We then need to return it because it determines the lattice dimension for the
|
||||
# reduction.
|
||||
|
||||
return slv_params, m_
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
beta: int,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=None,
|
||||
):
|
||||
"""
|
||||
Computes the cost of the dual hybrid attack that dual reduces the LWE instance and then
|
||||
uses the given solver to solve the reduced instance.
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param beta: Block size used to produce short dual vectors for dual reduction
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: Use LLL calls to produce more small vectors
|
||||
|
||||
.. note :: This function assumes that the instance is normalized. It runs no optimization,
|
||||
it merely reports costs.
|
||||
|
||||
"""
|
||||
Logging.log("dual", log_level, f"β={beta}, ζ={zeta}, h1={h1}")
|
||||
|
||||
delta = deltaf(beta)
|
||||
|
||||
if red_cost_model.__name__ in DualHybrid.full_sieves:
|
||||
rho = 4.0 / 3
|
||||
elif use_lll:
|
||||
rho = 2.0
|
||||
else:
|
||||
rho = 1.0
|
||||
|
||||
params_slv, m_ = DualHybrid.dual_reduce(
|
||||
delta, params, zeta, h1, rho, log_level=log_level + 1
|
||||
)
|
||||
Logging.log("dual", log_level + 1, f"red LWE instance: {repr(params_slv)}")
|
||||
|
||||
cost_slv = solver(params_slv, success_probability)
|
||||
Logging.log("dual", log_level + 2, f"solve: {repr(cost_slv)}")
|
||||
|
||||
d = m_ + params.n - zeta
|
||||
cost_red = costf(red_cost_model, beta, d)
|
||||
if red_cost_model.__name__ in DualHybrid.full_sieves:
|
||||
# if we use full sieving, we get many short vectors
|
||||
# we compute in logs to avoid overflows in case m
|
||||
# or beta happen to be large
|
||||
try:
|
||||
log_rep = max(0, log(cost_slv["m"]) - (beta / 2) * log(4 / 3))
|
||||
if log_rep > 10 ** 10:
|
||||
# sage's ceil function seems to completely freak out for large
|
||||
# inputs, but then m is huge, so unlikely to be relevant
|
||||
raise OverflowError()
|
||||
cost_red = cost_red.repeat(ceil(exp(log_rep)))
|
||||
except OverflowError:
|
||||
# if we get an overflow, m must be huge
|
||||
# so we can probably approximate the cost with
|
||||
# oo for our purposes
|
||||
return Cost(rop=oo)
|
||||
elif use_lll:
|
||||
cost_red["rop"] += cost_slv["m"] * LLL(d, log(params.q, 2))
|
||||
cost_red["repetitions"] = cost_slv["m"]
|
||||
else:
|
||||
cost_red = cost_red.repeat(cost_slv["m"])
|
||||
|
||||
Logging.log("dual", log_level + 2, f"red: {repr(cost_red)}")
|
||||
|
||||
total_cost = cost_slv.combine(cost_red)
|
||||
total_cost["m"] = m_
|
||||
total_cost["rop"] = cost_red["rop"] + cost_slv["rop"]
|
||||
total_cost["mem"] = cost_slv["mem"]
|
||||
|
||||
if d < params.n - zeta:
|
||||
raise RuntimeError(f"{d} < {params.n - zeta}, {params.n}, {zeta}, {m_}")
|
||||
total_cost["d"] = d
|
||||
|
||||
Logging.log("dual", log_level, f"{repr(total_cost)}")
|
||||
|
||||
rep = 1
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
probability = prob_drop(params.n, h, zeta, h1)
|
||||
rep = prob_amplify(success_probability, probability)
|
||||
# don't need more samples to re-run attack, since we may
|
||||
# just guess different components of the secret
|
||||
return total_cost.repeat(times=rep, select={"m": False})
|
||||
|
||||
@staticmethod
|
||||
def optimize_blocksize(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
h1: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=5,
|
||||
opt_step=2,
|
||||
):
|
||||
"""
|
||||
Optimizes the cost of the dual hybrid attack over the block size β.
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param zeta: Dimension ζ ≥ 0 of new LWE instance
|
||||
:param h1: Number of non-zero components of the secret of the new LWE instance
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: Use LLL calls to produce more small vectors
|
||||
:param opt_step: control robustness of optimizer
|
||||
|
||||
.. note :: This function assumes that the instance is normalized. ζ and h1 are fixed.
|
||||
|
||||
"""
|
||||
|
||||
f = partial(
|
||||
DualHybrid.cost,
|
||||
solver=solver,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
h1=h1,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level,
|
||||
)
|
||||
# don't have a reliable upper bound for beta
|
||||
# we choose n - k arbitrarily and adjust later if
|
||||
# necessary
|
||||
beta_upper = max(params.n - zeta, 40)
|
||||
beta = beta_upper
|
||||
while beta == beta_upper:
|
||||
beta_upper *= 2
|
||||
with local_minimum(2, beta_upper, opt_step) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta=beta))
|
||||
for beta in it.neighborhood:
|
||||
it.update(f(beta=beta))
|
||||
cost = it.y
|
||||
beta = cost["beta"]
|
||||
|
||||
cost["zeta"] = zeta
|
||||
if params.Xs.is_sparse:
|
||||
cost["h1"] = h1
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
opt_step=2,
|
||||
log_level=1,
|
||||
):
|
||||
"""
|
||||
Optimizes the cost of the dual hybrid attack (using the given solver) over
|
||||
all attack parameters: block size β, splitting dimension ζ, and
|
||||
splitting weight h1 (in case the secret distribution is sparse). Since
|
||||
the cost function for the dual hybrid might only be convex in an approximate
|
||||
sense, the parameter ``opt_step`` allows to make the optimization procedure more
|
||||
robust against local irregularities (higher value) at the cost of a longer
|
||||
running time. In a nutshell, if the cost of the dual hybrid seems suspiciosly
|
||||
high, try a larger ``opt_step`` (e.g. 4 or 8).
|
||||
|
||||
:param solver: Algorithm for solving the reduced instance
|
||||
:param params: LWE parameters
|
||||
:param success_probability: The success probability to target
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param use_lll: use LLL calls to produce more small vectors
|
||||
:param opt_step: control robustness of optimizer
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``h1``: Number of non-zero components among guessed coordinates (if secret distribution is sparse)
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
- When ζ = 1 this function essentially estimates the dual attack.
|
||||
- When ζ > 1 and ``solver`` is ``exhaustive_search`` this function estimates
|
||||
the hybrid attack as given in [INDOCRYPT:EspJouKha20]_
|
||||
- When ζ > 1 and ``solver`` is ``mitm`` this function estimates the dual MITM
|
||||
hybrid attack roughly following [EPRINT:CHHS19]_
|
||||
|
||||
EXAMPLES::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=1024, q = 2**32, Xs=ND.Uniform(0,1), Xe=ND.DiscreteGaussian(3.0))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^115.5, mem: ≈2^68.0, m: 1018, red: ≈2^115.4, δ: 1.005021, β: 284, d: 2042, ↻: ≈2^68.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^111.3, mem: ≈2^106.4, m: 983, red: ≈2^111.2, δ: 1.005204, β: 269, d: 1957, ↻: ≈2^56.4, ζ: 50...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^141.1, mem: ≈2^139.1, m: 1189, k: 132, ↻: 139, red: ≈2^140.8, δ: 1.004164, β: 375, d: 2021...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization="numerical")
|
||||
rop: ≈2^140.6, m: 1191, k: 128, mem: ≈2^136.0, ↻: 133, red: ≈2^140.2, δ: 1.004179, β: 373, d: 2052...
|
||||
|
||||
>>> params = params.updated(Xs=ND.SparseTernary(params.n, 32))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^111.7, mem: ≈2^66.0, m: 950, red: ≈2^111.5, δ: 1.005191, β: 270, d: 1974, ↻: ≈2^66.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^97.8, mem: ≈2^81.9, m: 730, red: ≈2^97.4, δ: 1.006813, β: 175, d: 1453, ↻: ≈2^36.3, ζ: 301...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^103.4, mem: ≈2^81.5, m: 724, k: 310, ↻: ≈2^27.3, red: ≈2^102.7, δ: 1.006655, β: 182...
|
||||
|
||||
>>> params = params.updated(Xs=ND.CenteredBinomial(8))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^123.1, mem: ≈2^75.4, m: 1151, red: ≈2^123.0, δ: 1.004727, β: 311, d: 2175, ↻: ≈2^75.4, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^122.4, mem: ≈2^116.6, m: 1143, red: ≈2^122.2, δ: 1.004758, β: 308, d: 2157, ↻: ≈2^75.8, ζ: 10...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^181.7, mem: ≈2^179.2, m: 1554, k: 42, ↻: 179, red: ≈2^181.4, δ: 1.003315, β: 519, d: 2513...
|
||||
|
||||
>>> params = params.updated(Xs=ND.DiscreteGaussian(3.0))
|
||||
>>> LWE.dual(params)
|
||||
rop: ≈2^125.4, mem: ≈2^78.0, m: 1190, red: ≈2^125.3, δ: 1.004648, β: 319, d: 2214, ↻: ≈2^78.0, tag: dual
|
||||
>>> LWE.dual_hybrid(params)
|
||||
rop: ≈2^125.1, mem: ≈2^117.7, m: 1187, red: ≈2^125.0, δ: 1.004657, β: 318, d: 2204, ↻: ≈2^75.9, ζ: 7...
|
||||
>>> LWE.dual_hybrid(params, mitm_optimization=True)
|
||||
rop: ≈2^175.0, mem: ≈2^168.9, m: 1547, k: 27, ↻: 169, red: ≈2^175.0, δ: 1.003424, β: 496, d: 2544, ζ: 27...
|
||||
"""
|
||||
|
||||
Cost.register_impermanent(
|
||||
rop=True,
|
||||
mem=False,
|
||||
red=True,
|
||||
beta=False,
|
||||
delta=False,
|
||||
m=True,
|
||||
d=False,
|
||||
zeta=False,
|
||||
)
|
||||
Logging.log("dual", log_level, f"costing LWE instance: {repr(params)}")
|
||||
|
||||
params = params.normalize()
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
Cost.register_impermanent(h1=False)
|
||||
|
||||
def _optimize_blocksize(
|
||||
solver,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
log_level=None,
|
||||
):
|
||||
h = params.Xs.get_hamming_weight(params.n)
|
||||
h1_min = max(0, h - (params.n - zeta))
|
||||
h1_max = min(zeta, h)
|
||||
Logging.log("dual", log_level, f"h1 ∈ [{h1_min},{h1_max}] (zeta={zeta})")
|
||||
with local_minimum(h1_min, h1_max, log_level=log_level + 1) as it:
|
||||
for h1 in it:
|
||||
cost = self.optimize_blocksize(
|
||||
h1=h1,
|
||||
solver=solver,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level + 2,
|
||||
)
|
||||
it.update(cost)
|
||||
return it.y
|
||||
|
||||
else:
|
||||
_optimize_blocksize = self.optimize_blocksize
|
||||
|
||||
f = partial(
|
||||
_optimize_blocksize,
|
||||
solver=solver,
|
||||
params=params,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=log_level + 1,
|
||||
)
|
||||
|
||||
with local_minimum(1, params.n - 1, opt_step) as it:
|
||||
for zeta in it:
|
||||
it.update(f(zeta=zeta))
|
||||
for zeta in it.neighborhood:
|
||||
it.update(f(zeta=zeta))
|
||||
cost = it.y
|
||||
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
|
||||
DH = DualHybrid()
|
||||
|
||||
|
||||
def dual(
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
):
|
||||
"""
|
||||
Dual hybrid attack as in [PQCBook:MicReg09]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: The success probability to target.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param use_lll: use LLL calls to produce more small vectors.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
"""
|
||||
Cost.register_impermanent(
|
||||
rop=True,
|
||||
mem=False,
|
||||
red=True,
|
||||
beta=False,
|
||||
delta=False,
|
||||
m=True,
|
||||
d=False,
|
||||
)
|
||||
|
||||
ret = DH.optimize_blocksize(
|
||||
solver=distinguish,
|
||||
params=params,
|
||||
zeta=0,
|
||||
h1=0,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
log_level=1,
|
||||
)
|
||||
del ret["zeta"]
|
||||
if hasattr(ret, "h1"):
|
||||
del ret["h1"]
|
||||
ret["tag"] = "dual"
|
||||
return ret
|
||||
|
||||
|
||||
def dual_hybrid(
|
||||
params: LWEParameters,
|
||||
success_probability: float = 0.99,
|
||||
red_cost_model=red_cost_model_default,
|
||||
use_lll=True,
|
||||
mitm_optimization=False,
|
||||
opt_step=2,
|
||||
):
|
||||
"""
|
||||
Dual hybrid attack from [INDOCRYPT:EspJouKha20]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param success_probability: The success probability to target.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param use_lll: Use LLL calls to produce more small vectors.
|
||||
:param mitm_optimization: One of "analytical" or "numerical". If ``True`` a default from the
|
||||
``conf`` module is picked, ``False`` disables MITM.
|
||||
:param opt_step: Control robustness of optimizer.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: Total amount of memory used by solver (in elements mod q).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``h1``: Number of non-zero components among guessed coordinates (if secret distribution is sparse)
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repetitions``: How often we are required to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
"""
|
||||
|
||||
if mitm_optimization is True:
|
||||
mitm_optimization = mitm_opt_default
|
||||
|
||||
if mitm_optimization:
|
||||
solver = partial(mitm, optimization=mitm_optimization)
|
||||
else:
|
||||
solver = exhaustive_search
|
||||
|
||||
ret = DH(
|
||||
solver=solver,
|
||||
params=params,
|
||||
success_probability=success_probability,
|
||||
red_cost_model=red_cost_model,
|
||||
use_lll=use_lll,
|
||||
opt_step=opt_step,
|
||||
)
|
||||
if mitm_optimization:
|
||||
ret["tag"] = "dual_mitm_hybrid"
|
||||
else:
|
||||
ret["tag"] = "dual_hybrid"
|
||||
return ret
|
||||
417
estimator_new/lwe_guess.py
Normal file
417
estimator_new/lwe_guess.py
Normal file
@@ -0,0 +1,417 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Generic multiplicative composition of guessing some components of the LWE secret and some LWE solving algorithm.
|
||||
|
||||
By "multiplicative" we mean that costs multiply rather than add. It is often possible to achieve
|
||||
some form of additive composition, i.e. this strategy is rarely the most efficient.
|
||||
|
||||
"""
|
||||
|
||||
from sage.all import log, floor, ceil, binomial
|
||||
from sage.all import sqrt, pi, exp, RR, ZZ, oo, round, e
|
||||
|
||||
from .conf import mitm_opt
|
||||
from .cost import Cost
|
||||
from .errors import InsufficientSamplesError, OutOfBoundsError
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .prob import amplify as prob_amplify
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify_sigma
|
||||
from .util import local_minimum
|
||||
from .nd import sigmaf
|
||||
|
||||
|
||||
def log2(x):
|
||||
return log(x, 2)
|
||||
|
||||
|
||||
class guess_composition:
|
||||
def __init__(self, f):
|
||||
"""
|
||||
Create a generic composition of guessing and `f`.
|
||||
"""
|
||||
self.f = f
|
||||
self.__name__ = f"{f.__name__}+guessing"
|
||||
|
||||
@classmethod
|
||||
def dense_solve(cls, f, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a dense secret then call `f`.
|
||||
|
||||
:param f: Some object consuming `params` and outputting some `cost`
|
||||
:param params: LWE parameters.
|
||||
|
||||
"""
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0] + 1
|
||||
|
||||
baseline_cost = f(params, **kwds)
|
||||
|
||||
max_zeta = min(floor(log(baseline_cost["rop"], base)), params.n)
|
||||
|
||||
with local_minimum(0, max_zeta, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
search_space = base ** zeta
|
||||
cost = f(params.updated(n=params.n - zeta), log_level=log_level + 1, **kwds)
|
||||
repeated_cost = cost.repeat(search_space)
|
||||
repeated_cost["zeta"] = zeta
|
||||
it.update(repeated_cost)
|
||||
return it.y
|
||||
|
||||
@classmethod
|
||||
def gammaf(cls, n, h, zeta, base, g=lambda x: x):
|
||||
"""
|
||||
Find optimal hamming weight for sparse guessing.
|
||||
|
||||
Let `s` be a vector of dimension `n` where we expect `h` non-zero entries. We are ignoring `η-γ`
|
||||
components and are guessing `γ`. This succeeds with some probability given by ``prob_drop(n, h,
|
||||
ζ, γ)``. Exhaustively searching the guesses takes `binomial(n, γ) ⋅ b^γ` steps where `b` is the
|
||||
number of non-zero values in a component of `s`. We call a `γ` optimal if it minimizes the
|
||||
overall number of repetitions that need to be performed to succeed with probability 99%.
|
||||
|
||||
:param n: vector dimension
|
||||
:param h: hamming weight of the vector
|
||||
:param zeta: number of ignored + guesses components
|
||||
:param base: number of possible non-zero scalars
|
||||
:param g: We do not consider search space directly by `g()` applied to it (think time-memory
|
||||
trade-offs).
|
||||
:returns: (number of repetitions, γ, size of the search space, probability of success)
|
||||
|
||||
"""
|
||||
if not zeta:
|
||||
return 1, 0, 0, 1.0
|
||||
|
||||
search_space = 0
|
||||
gamma = 0
|
||||
probability = 0
|
||||
best = None, None, None, None
|
||||
while gamma < min(h, zeta):
|
||||
probability += prob_drop(n, h, zeta, fail=gamma)
|
||||
search_space += binomial(zeta, gamma) * base ** gamma
|
||||
repeat = prob_amplify(0.99, probability) * g(search_space)
|
||||
if best[0] is None or repeat < best[0]:
|
||||
best = repeat, gamma, search_space, probability
|
||||
gamma += 1
|
||||
else:
|
||||
break
|
||||
return best
|
||||
|
||||
@classmethod
|
||||
def sparse_solve(cls, f, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a sparse secret then call `f`.
|
||||
|
||||
:param f: Some object consuming `params` and outputting some `cost`
|
||||
:param params: LWE parameters.
|
||||
"""
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0] # we exclude zero
|
||||
h = ceil(len(params.Xs) * params.Xs.density) # nr of non-zero entries
|
||||
|
||||
with local_minimum(0, params.n - 40, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
single_cost = f(params.updated(n=params.n - zeta), log_level=log_level + 1, **kwds)
|
||||
repeat, gamma, search_space, probability = cls.gammaf(params.n, h, zeta, base)
|
||||
cost = single_cost.repeat(repeat)
|
||||
cost["zeta"] = zeta
|
||||
cost["|S|"] = search_space
|
||||
cost["prop"] = probability
|
||||
it.update(cost)
|
||||
return it.y
|
||||
|
||||
def __call__(self, params, log_level=5, **kwds):
|
||||
"""
|
||||
Guess components of a secret then call `f`.
|
||||
|
||||
:param params: LWE parameters.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> from estimator.lwe_guess import guess_composition
|
||||
>>> guess_composition(LWE.primal_usvp)(Kyber512.updated(Xs=ND.SparseTernary(512, 16)))
|
||||
rop: ≈2^102.8, red: ≈2^102.8, δ: 1.008705, β: 113, d: 421, tag: usvp, ↻: ≈2^37.5, ζ: 265, |S|: 1, ...
|
||||
|
||||
Compare::
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)))
|
||||
rop: ≈2^86.6, red: ≈2^85.7, svp: ≈2^85.6, β: 104, η: 2, ζ: 371, |S|: ≈2^91.1, d: 308, prob: ≈2^-21.3, ...
|
||||
|
||||
"""
|
||||
if params.Xs.is_sparse:
|
||||
return self.sparse_solve(self.f, params, log_level, **kwds)
|
||||
else:
|
||||
return self.dense_solve(self.f, params, log_level, **kwds)
|
||||
|
||||
|
||||
class ExhaustiveSearch:
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99, quantum: bool = False):
|
||||
"""
|
||||
Estimate cost of solving LWE via exhaustive search.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:param quantum: use estimate for quantum computer (we simply take the square root of the search space)
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish the correct solution with high probability.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> exhaustive_search(params)
|
||||
rop: ≈2^73.6, mem: ≈2^72.6, m: 397.198
|
||||
>>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> exhaustive_search(params)
|
||||
rop: ≈2^417.3, mem: ≈2^416.3, m: ≈2^11.2
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# there are two stages: enumeration and distinguishing, so we split up the success_probability
|
||||
probability = sqrt(success_probability)
|
||||
|
||||
try:
|
||||
size = params.Xs.support_size(n=params.n, fraction=probability)
|
||||
except NotImplementedError:
|
||||
# not achieving required probability with search space
|
||||
# given our settings that means the search space is huge
|
||||
# so we approximate the cost with oo
|
||||
return Cost(rop=oo, mem=oo, m=1)
|
||||
|
||||
if quantum:
|
||||
size = size.sqrt()
|
||||
|
||||
# set m according to [ia.cr/2020/515]
|
||||
sigma = params.Xe.stddev / params.q
|
||||
m_required = RR(
|
||||
8 * exp(4 * pi * pi * sigma * sigma) * (log(size) - log(log(1 / probability)))
|
||||
)
|
||||
|
||||
if params.m < m_required:
|
||||
raise InsufficientSamplesError(
|
||||
f"Exhaustive search: Need {m_required} samples but only {params.m} available."
|
||||
)
|
||||
else:
|
||||
m = m_required
|
||||
|
||||
# we can compute A*s for all candidate s in time 2*size*m using
|
||||
# (the generalization [ia.cr/2021/152] of) the recursive algorithm
|
||||
# from [ia.cr/2020/515]
|
||||
cost = 2 * size * m
|
||||
|
||||
ret = Cost(rop=cost, mem=cost / 2, m=m)
|
||||
return ret
|
||||
|
||||
__name__ = "exhaustive_search"
|
||||
|
||||
|
||||
exhaustive_search = ExhaustiveSearch()
|
||||
|
||||
|
||||
class MITM:
|
||||
|
||||
locality = 0.05
|
||||
|
||||
def X_range(self, nd):
|
||||
if nd.is_bounded:
|
||||
a, b = nd.bounds
|
||||
return b - a + 1, 1.0
|
||||
else:
|
||||
# setting fraction=0 to ensure that support size does not
|
||||
# throw error. we'll take the probability into account later
|
||||
rng = nd.support_size(n=1, fraction=0.0)
|
||||
return rng, nd.gaussian_tail_prob
|
||||
|
||||
def local_range(self, center):
|
||||
return ZZ(floor((1 - self.locality) * center)), ZZ(ceil((1 + self.locality) * center))
|
||||
|
||||
def mitm_analytical(self, params: LWEParameters, success_probability=0.99):
|
||||
nd_rng, nd_p = self.X_range(params.Xe)
|
||||
delta = nd_rng / params.q # possible error range scaled
|
||||
|
||||
sd_rng, sd_p = self.X_range(params.Xs)
|
||||
|
||||
# determine the number of elements in the tables depending on splitting dim
|
||||
n = params.n
|
||||
k = round(n / (2 - delta))
|
||||
# we could now call self.cost with this k, but using our model below seems
|
||||
# about 3x faster and reasonably accurate
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(n=params.n)
|
||||
split_h = round(h * k / n)
|
||||
success_probability_ = (
|
||||
binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h)
|
||||
)
|
||||
|
||||
logT = RR(h * (log2(n) - log2(h) + log2(sd_rng - 1) + log2(e))) / (2 - delta)
|
||||
logT -= RR(log2(h) / 2)
|
||||
logT -= RR(h * h * log2(e) / (2 * n * (2 - delta) ** 2))
|
||||
else:
|
||||
success_probability_ = 1.0
|
||||
logT = k * log(sd_rng, 2)
|
||||
|
||||
m_ = max(1, round(logT + log(logT, 2)))
|
||||
if params.m < m_:
|
||||
raise InsufficientSamplesError(
|
||||
f"MITM: Need {m_} samples but only {params.m} available."
|
||||
)
|
||||
|
||||
# since m = logT + loglogT and rop = T*m, we have rop=2^m
|
||||
ret = Cost(rop=RR(2 ** m_), mem=2 ** logT * m_, m=m_, k=ZZ(k))
|
||||
repeat = prob_amplify(success_probability, sd_p ** n * nd_p ** m_ * success_probability_)
|
||||
return ret.repeat(times=repeat)
|
||||
|
||||
def cost(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
k: int,
|
||||
success_probability=0.99,
|
||||
):
|
||||
nd_rng, nd_p = self.X_range(params.Xe)
|
||||
delta = nd_rng / params.q # possible error range scaled
|
||||
|
||||
sd_rng, sd_p = self.X_range(params.Xs)
|
||||
n = params.n
|
||||
|
||||
if params.Xs.is_sparse:
|
||||
h = params.Xs.get_hamming_weight(n=n)
|
||||
|
||||
# we assume the hamming weight to be distributed evenly across the two parts
|
||||
# if not we can rerandomize on the coordinates and try again -> repeat
|
||||
split_h = round(h * k / n)
|
||||
size_tab = RR((sd_rng - 1) ** split_h * binomial(k, split_h))
|
||||
size_sea = RR((sd_rng - 1) ** (h - split_h) * binomial(n - k, h - split_h))
|
||||
success_probability_ = (
|
||||
binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h)
|
||||
)
|
||||
else:
|
||||
size_tab = sd_rng ** k
|
||||
size_sea = sd_rng ** (n - k)
|
||||
success_probability_ = 1
|
||||
|
||||
# we set m such that it approximately minimizes the search cost per query as
|
||||
# a reasonable starting point and then optimize around it
|
||||
m_ = ceil(max(log2(size_tab) + log2(log2(size_tab)), 1))
|
||||
a, b = self.local_range(m_)
|
||||
with local_minimum(a, b, smallerf=lambda x, best: x[1] <= best[1]) as it:
|
||||
for m in it:
|
||||
# for search we effectively build a second table and for each entry, we expect
|
||||
# 2^( m * 4 * B / q) = 2^(delta * m) table look ups + a l_oo computation (costing m)
|
||||
# for every hit in the table (which has probability T/2^m)
|
||||
cost = (m, size_sea * (2 * m + 2 ** (delta * m) * (1 + size_tab * m / 2 ** m)))
|
||||
it.update(cost)
|
||||
m, cost_search = it.y
|
||||
m = min(m, params.m)
|
||||
|
||||
# building the table costs 2*T*m using the generalization [ia.cr/2021/152] of
|
||||
# the recursive algorithm from [ia.cr/2020/515]
|
||||
cost_table = size_tab * 2 * m
|
||||
|
||||
ret = Cost(rop=(cost_table + cost_search), m=m, k=k)
|
||||
ret["mem"] = size_tab * (k + m) + size_sea * (n - k + m)
|
||||
repeat = prob_amplify(success_probability, sd_p ** n * nd_p ** m * success_probability_)
|
||||
return ret.repeat(times=repeat)
|
||||
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99, optimization=mitm_opt):
|
||||
"""
|
||||
Estimate cost of solving LWE via Meet-In-The-Middle attack.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:param model: Either "analytical" (faster, default) or "numerical" (more accurate)
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish the correct solution with high probability.
|
||||
- ``k``: Splitting dimension.
|
||||
- ``↻``: Repetitions required to achieve targeted success probability
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> mitm(params)
|
||||
rop: ≈2^37.0, mem: ≈2^37.2, m: 37, k: 32, ↻: 1
|
||||
>>> mitm(params, optimization="numerical")
|
||||
rop: ≈2^39.2, m: 36, k: 32, mem: ≈2^39.1, ↻: 1
|
||||
>>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2))
|
||||
>>> mitm(params)
|
||||
rop: ≈2^215.4, mem: ≈2^210.2, m: ≈2^13.1, k: 512, ↻: 43
|
||||
>>> mitm(params, optimization="numerical")
|
||||
rop: ≈2^216.0, m: ≈2^13.1, k: 512, mem: ≈2^211.4, ↻: 43
|
||||
|
||||
"""
|
||||
Cost.register_impermanent(rop=True, mem=False, m=True, k=False)
|
||||
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
nd_rng, _ = self.X_range(params.Xe)
|
||||
if nd_rng >= params.q:
|
||||
# MITM attacks cannot handle an error this large.
|
||||
return Cost(rop=oo, mem=oo, m=0, k=0)
|
||||
|
||||
if "analytical" in optimization:
|
||||
return self.mitm_analytical(params=params, success_probability=success_probability)
|
||||
elif "numerical" in optimization:
|
||||
with local_minimum(1, params.n - 1) as it:
|
||||
for k in it:
|
||||
cost = self.cost(k=k, params=params, success_probability=success_probability)
|
||||
it.update(cost)
|
||||
ret = it.y
|
||||
# if the noise is large, the curve might not be convex, so the above minimum
|
||||
# is not correct. Interestingly, in these cases, it seems that k=1 might be smallest
|
||||
ret1 = self.cost(k=1, params=params, success_probability=success_probability)
|
||||
return min(ret, ret1)
|
||||
else:
|
||||
raise ValueError("Unknown optimization method for MITM.")
|
||||
|
||||
__name__ = "mitm"
|
||||
|
||||
|
||||
mitm = MITM()
|
||||
|
||||
|
||||
class Distinguisher:
|
||||
def __call__(self, params: LWEParameters, success_probability=0.99):
|
||||
"""
|
||||
Estimate cost of distinguishing a 0-dimensional LWE instance from uniformly random,
|
||||
which is essentially the number of samples required.
|
||||
|
||||
:param params: LWE parameters
|
||||
:param success_probability: the targeted success probability
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``mem``: memory requirement in integers mod q.
|
||||
- ``m``: Required number of samples to distinguish.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> params = LWE.Parameters(n=0, q=2 ** 32, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(2 ** 32))
|
||||
>>> distinguish(params)
|
||||
rop: ≈2^60.0, mem: ≈2^60.0, m: ≈2^60.0
|
||||
|
||||
"""
|
||||
|
||||
if params.n > 0:
|
||||
raise OutOfBoundsError("Secret dimension should be 0 for distinguishing. Try exhaustive search for n > 0.")
|
||||
m = amplify_sigma(success_probability, sigmaf(params.Xe.stddev), params.q)
|
||||
if (m > params.m):
|
||||
raise InsufficientSamplesError("Not enough samples to distinguish with target advantage.")
|
||||
return Cost(rop=m, mem=m, m=m)
|
||||
|
||||
__name__ = "distinguish"
|
||||
|
||||
|
||||
distinguish = Distinguisher()
|
||||
161
estimator_new/lwe_parameters.py
Normal file
161
estimator_new/lwe_parameters.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import oo, binomial, log, sqrt, ceil
|
||||
from dataclasses import dataclass
|
||||
from copy import copy
|
||||
from .nd import NoiseDistribution
|
||||
from .errors import InsufficientSamplesError
|
||||
|
||||
|
||||
@dataclass
|
||||
class LWEParameters:
|
||||
n: int
|
||||
q: int
|
||||
Xs: NoiseDistribution
|
||||
Xe: NoiseDistribution
|
||||
m: int = oo
|
||||
tag: str = None
|
||||
|
||||
def __post_init__(self, **kwds):
|
||||
self.Xs = copy(self.Xs)
|
||||
self.Xs.n = self.n
|
||||
if self.m < oo:
|
||||
self.Xe = copy(self.Xe)
|
||||
self.Xe.n = self.m
|
||||
|
||||
def normalize(self):
|
||||
"""
|
||||
EXAMPLES:
|
||||
|
||||
We perform the normal form transformation if χ_e < χ_s and we got the samples::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Xs=ND.DiscreteGaussian(2.0)
|
||||
>>> Xe=ND.DiscreteGaussian(1.58)
|
||||
>>> LWE.Parameters(n=512, q=8192, Xs=Xs, Xe=Xe).normalize()
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=1.58), m=+Infinity, tag=None)
|
||||
|
||||
If m = n, we swap the secret and the noise::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Xs=ND.DiscreteGaussian(2.0)
|
||||
>>> Xe=ND.DiscreteGaussian(1.58)
|
||||
>>> LWE.Parameters(n=512, q=8192, Xs=Xs, Xe=Xe, m=512).normalize()
|
||||
LWEParameters(n=512, q=8192, Xs=D(σ=1.58), Xe=D(σ=2.00), m=512, tag=None)
|
||||
|
||||
"""
|
||||
if self.m < 1:
|
||||
raise InsufficientSamplesError(f"m={self.m} < 1")
|
||||
|
||||
# Normal form transformation
|
||||
if self.Xe < self.Xs and self.m >= 2 * self.n:
|
||||
return LWEParameters(
|
||||
n=self.n, q=self.q, Xs=self.Xe, Xe=self.Xe, m=self.m - self.n, tag=self.tag
|
||||
)
|
||||
# swap secret and noise
|
||||
# TODO: this is somewhat arbitrary
|
||||
if self.Xe < self.Xs and self.m < 2 * self.n:
|
||||
return LWEParameters(n=self.n, q=self.q, Xs=self.Xe, Xe=self.Xs, m=self.n, tag=self.tag)
|
||||
|
||||
# nothing to do
|
||||
return self
|
||||
|
||||
def updated(self, **kwds):
|
||||
"""
|
||||
Return a new set of parameters updated according to ``kwds``.
|
||||
|
||||
:param kwds: We set ``key`` to ``value`` in the new set of parameters.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> Kyber512
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=512, tag='Kyber 512')
|
||||
>>> Kyber512.updated(m=1337)
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=1337, tag='Kyber 512')
|
||||
|
||||
"""
|
||||
d = dict(self.__dict__)
|
||||
d.update(kwds)
|
||||
return LWEParameters(**d)
|
||||
|
||||
def amplify_m(self, m):
|
||||
"""
|
||||
Return a LWE instance parameters with ``m`` samples produced from the samples in this instance.
|
||||
|
||||
:param m: New number of samples.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from sage.all import binomial, log
|
||||
>>> from estimator import *
|
||||
>>> Kyber512
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=1.22), m=512, tag='Kyber 512')
|
||||
>>> Kyber512.amplify_m(2**100)
|
||||
LWEParameters(n=512, q=3329, Xs=D(σ=1.22), Xe=D(σ=4.58), m=..., tag='Kyber 512')
|
||||
|
||||
We can produce 2^100 samples by random ± linear combinations of 12 vectors::
|
||||
|
||||
>>> float(sqrt(12.)), float(log(binomial(1024, 12) , 2.0)) + 12
|
||||
(3.46..., 103.07...)
|
||||
|
||||
"""
|
||||
if m <= self.m:
|
||||
return self
|
||||
if self.m == oo:
|
||||
return self
|
||||
d = dict(self.__dict__)
|
||||
|
||||
if self.Xe.mean != 0:
|
||||
raise NotImplementedError("Amplifying for μ≠0 not implemented.")
|
||||
|
||||
for k in range(ceil(log(m, 2.0))):
|
||||
# - binom(n,k) positions
|
||||
# -two signs per position (+1,-1)
|
||||
# - all "-" and all "+" are the same
|
||||
if binomial(self.m, k) * 2 ** k - 1 >= m:
|
||||
Xe = NoiseDistribution.DiscreteGaussian(float(sqrt(k) * self.Xe.stddev))
|
||||
d["Xe"] = Xe
|
||||
d["m"] = ceil(m)
|
||||
return LWEParameters(**d)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Cannot amplify to ≈2^{log(m,2):1} using {{+1,-1}} additions."
|
||||
)
|
||||
|
||||
def switch_modulus(self):
|
||||
"""
|
||||
Apply modulus switching and return new instance.
|
||||
|
||||
See [JMC:AlbPlaSco15]_ for details.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.Parameters(n=128, q=7681, Xs=ND.UniformMod(3), Xe=ND.UniformMod(11)).switch_modulus()
|
||||
LWEParameters(n=128, q=5289, Xs=D(σ=0.82), Xe=D(σ=3.08), m=+Infinity, tag=None)
|
||||
|
||||
"""
|
||||
n = self.Xs.density * len(self.Xs)
|
||||
|
||||
# n uniform in -(0.5,0.5) ± stddev(χ_s)
|
||||
Xr_stddev = sqrt(n / 12) * self.Xs.stddev # rounding noise
|
||||
# χ_r == p/q ⋅ χ_e # we want the rounding noise match the scaled noise
|
||||
p = ceil(Xr_stddev * self.q / self.Xe.stddev)
|
||||
|
||||
scale = float(p) / self.q
|
||||
|
||||
# there is no point in scaling if the improvement is eaten up by rounding noise
|
||||
if scale > 1 / sqrt(2):
|
||||
return self
|
||||
|
||||
return LWEParameters(
|
||||
self.n,
|
||||
p,
|
||||
Xs=self.Xs,
|
||||
Xe=NoiseDistribution.DiscreteGaussian(sqrt(2) * self.Xe.stddev * scale),
|
||||
m=self.m,
|
||||
tag=self.tag + ",scaled" if self.tag else None,
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.n, self.q, self.Xs, self.Xe, self.m, self.tag))
|
||||
596
estimator_new/lwe_primal.py
Normal file
596
estimator_new/lwe_primal.py
Normal file
@@ -0,0 +1,596 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Estimate cost of solving LWE using primal attacks.
|
||||
|
||||
See :ref:`LWE Primal Attacks` for an introduction what is available.
|
||||
|
||||
"""
|
||||
from functools import partial
|
||||
|
||||
from sage.all import oo, ceil, sqrt, log, RR, ZZ, binomial, cached_function
|
||||
from .reduction import delta as deltaf
|
||||
from .reduction import cost as costf
|
||||
from .util import local_minimum
|
||||
from .cost import Cost
|
||||
from .lwe_parameters import LWEParameters
|
||||
from .simulator import normalize as simulator_normalize
|
||||
from .prob import drop as prob_drop
|
||||
from .prob import amplify as prob_amplify
|
||||
from .prob import babai as prob_babai
|
||||
from .prob import mitm_babai_probability
|
||||
from .io import Logging
|
||||
from .conf import red_cost_model as red_cost_model_default
|
||||
from .conf import red_shape_model as red_shape_model_default
|
||||
from .conf import red_simulator as red_simulator_default
|
||||
|
||||
|
||||
class PrimalUSVP:
|
||||
"""
|
||||
Estimate cost of solving LWE via uSVP reduction.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _xi_factor(Xs, Xe):
|
||||
xi = RR(1)
|
||||
if Xs < Xe:
|
||||
xi = Xe.stddev / Xs.stddev
|
||||
return xi
|
||||
|
||||
@staticmethod
|
||||
def _solve_for_d(params, m, beta, tau, xi):
|
||||
"""
|
||||
Find smallest d ∈ [n,m] to satisfy uSVP condition.
|
||||
|
||||
If no such d exists, return the upper bound m.
|
||||
"""
|
||||
# Find the smallest d ∈ [n,m] s.t. a*d^2 + b*d + c >= 0
|
||||
delta = deltaf(beta)
|
||||
a = -log(delta)
|
||||
C = log(params.Xe.stddev ** 2 * (beta - 1) + tau ** 2) / 2.0
|
||||
b = log(delta) * (2 * beta - 1) + log(params.q) - C
|
||||
c = log(tau) + params.n * log(xi) - (params.n + 1) * log(params.q)
|
||||
n = params.n
|
||||
if a * n * n + b * n + c >= 0: # trivial case
|
||||
return n
|
||||
|
||||
# solve for ad^2 + bd + c == 0
|
||||
disc = b * b - 4 * a * c # the discriminant
|
||||
if disc < 0: # no solution, return m
|
||||
return m
|
||||
|
||||
# compute the two solutions
|
||||
d1 = (-b + sqrt(disc)) / (2 * a)
|
||||
d2 = (-b - sqrt(disc)) / (2 * a)
|
||||
if a > 0: # the only possible solution is ceiling(d2)
|
||||
return min(m, ceil(d2))
|
||||
|
||||
# the case a<=0:
|
||||
# if n is to the left of d1 then the first solution is ceil(d1)
|
||||
if n <= d1:
|
||||
return min(m, ceil(d1))
|
||||
|
||||
# otherwise, n must be larger than d2 (since an^2+bn+c<0) so no solution
|
||||
return m
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost_gsa(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
m: int = oo,
|
||||
tau=None,
|
||||
d=None,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=None,
|
||||
):
|
||||
|
||||
delta = deltaf(beta)
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
m = min(2 * ceil(sqrt(params.n * log(params.q) / log(delta))), m)
|
||||
tau = params.Xe.stddev if tau is None else tau
|
||||
d = PrimalUSVP._solve_for_d(params, m, beta, tau, xi) if d is None else d
|
||||
assert d <= m + 1
|
||||
|
||||
lhs = log(sqrt(params.Xe.stddev ** 2 * (beta - 1) + tau ** 2))
|
||||
rhs = RR(
|
||||
log(delta) * (2 * beta - d - 1)
|
||||
+ (log(tau) + log(xi) * params.n + log(params.q) * (d - params.n - 1)) / d
|
||||
)
|
||||
|
||||
return costf(red_cost_model, beta, d, predicate=lhs <= rhs)
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost_simulator(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
simulator,
|
||||
m: int = oo,
|
||||
tau=None,
|
||||
d=None,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=None,
|
||||
):
|
||||
delta = deltaf(beta)
|
||||
if d is None:
|
||||
d = min(ceil(sqrt(params.n * log(params.q) / log(delta))), m) + 1
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
tau = params.Xe.stddev if tau is None else tau
|
||||
|
||||
r = simulator(d=d, n=params.n, q=params.q, beta=beta, xi=xi, tau=tau)
|
||||
lhs = params.Xe.stddev ** 2 * (beta - 1) + tau ** 2
|
||||
if r[d - beta] > lhs:
|
||||
cost = costf(red_cost_model, beta, d)
|
||||
else:
|
||||
cost = costf(red_cost_model, beta, d, predicate=False)
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
red_cost_model=red_cost_model_default,
|
||||
red_shape_model=red_shape_model_default,
|
||||
optimize_d=True,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate cost of solving LWE via uSVP reduction.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction.
|
||||
:param red_shape_model: How to model the shape of a reduced basis.
|
||||
:param optimize_d: Attempt to find minimal d, too.
|
||||
:return: A cost dictionary.
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.primal_usvp(Kyber512)
|
||||
rop: ≈2^148.0, red: ≈2^148.0, δ: 1.003941, β: 406, d: 998, tag: usvp
|
||||
|
||||
>>> params = LWE.Parameters(n=200, q=127, Xs=ND.UniformMod(3), Xe=ND.UniformMod(3))
|
||||
>>> LWE.primal_usvp(params, red_shape_model="cn11")
|
||||
rop: ≈2^91.2, red: ≈2^91.2, δ: 1.006114, β: 209, d: 388, tag: usvp
|
||||
|
||||
>>> LWE.primal_usvp(params, red_shape_model=Simulator.CN11)
|
||||
rop: ≈2^91.2, red: ≈2^91.2, δ: 1.006114, β: 209, d: 388, tag: usvp
|
||||
|
||||
>>> LWE.primal_usvp(params, red_shape_model=Simulator.CN11, optimize_d=False)
|
||||
rop: ≈2^91.3, red: ≈2^91.3, δ: 1.006114, β: 209, d: 400, tag: usvp
|
||||
|
||||
The success condition was formulated in [USENIX:ADPS16]_ and studied/verified in
|
||||
[AC:AGVW17]_, [C:DDGR20]_, [PKC:PosVir21]_. The treatment of small secrets is from
|
||||
[ACISP:BaiGal14]_.
|
||||
|
||||
"""
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# allow for a larger embedding lattice dimension: Bai and Galbraith
|
||||
m = params.m + params.n if params.Xs <= params.Xe else params.m
|
||||
|
||||
if red_shape_model == "gsa":
|
||||
with local_minimum(40, 2 * params.n) as it:
|
||||
for beta in it:
|
||||
cost = self.cost_gsa(
|
||||
beta=beta, params=params, m=m, red_cost_model=red_cost_model, **kwds
|
||||
)
|
||||
it.update(cost)
|
||||
cost = it.y
|
||||
cost["tag"] = "usvp"
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
try:
|
||||
red_shape_model = simulator_normalize(red_shape_model)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# step 0. establish baseline
|
||||
cost_gsa = self(
|
||||
params,
|
||||
red_cost_model=red_cost_model,
|
||||
red_shape_model="gsa",
|
||||
)
|
||||
|
||||
Logging.log("usvp", log_level + 1, f"GSA: {repr(cost_gsa)}")
|
||||
|
||||
f = partial(
|
||||
self.cost_simulator,
|
||||
simulator=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
m=m,
|
||||
params=params,
|
||||
)
|
||||
|
||||
# step 1. find β
|
||||
|
||||
with local_minimum(
|
||||
cost_gsa["beta"] - ceil(0.10 * cost_gsa["beta"]),
|
||||
cost_gsa["beta"] + ceil(0.20 * cost_gsa["beta"]),
|
||||
) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta=beta, **kwds))
|
||||
cost = it.y
|
||||
|
||||
Logging.log("usvp", log_level, f"Opt-β: {repr(cost)}")
|
||||
|
||||
if cost and optimize_d:
|
||||
# step 2. find d
|
||||
with local_minimum(params.n, stop=cost["d"] + 1) as it:
|
||||
for d in it:
|
||||
it.update(f(d=d, beta=cost["beta"], **kwds))
|
||||
cost = it.y
|
||||
Logging.log("usvp", log_level + 1, f"Opt-d: {repr(cost)}")
|
||||
|
||||
cost["tag"] = "usvp"
|
||||
cost["problem"] = params
|
||||
return cost
|
||||
|
||||
__name__ = "primal_usvp"
|
||||
|
||||
|
||||
primal_usvp = PrimalUSVP()
|
||||
|
||||
|
||||
class PrimalHybrid:
|
||||
@classmethod
|
||||
def babai_cost(cls, d):
|
||||
return Cost(rop=max(d, 1) ** 2)
|
||||
|
||||
@classmethod
|
||||
def svp_dimension(cls, r, D):
|
||||
"""
|
||||
Return η for a given lattice shape and distance.
|
||||
|
||||
:param r: squared Gram-Schmidt norms
|
||||
|
||||
"""
|
||||
from fpylll.util import gaussian_heuristic
|
||||
|
||||
d = len(r)
|
||||
for i, _ in enumerate(r):
|
||||
if gaussian_heuristic(r[i:]) < D.stddev ** 2 * (d - i):
|
||||
return ZZ(d - (i - 1))
|
||||
return ZZ(2)
|
||||
|
||||
@staticmethod
|
||||
@cached_function
|
||||
def cost(
|
||||
beta: int,
|
||||
params: LWEParameters,
|
||||
zeta: int = 0,
|
||||
babai=False,
|
||||
mitm=False,
|
||||
m: int = oo,
|
||||
d: int = None,
|
||||
simulator=red_simulator_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Cost of the hybrid attack.
|
||||
|
||||
:param beta: Block size.
|
||||
:param params: LWE parameters.
|
||||
:param zeta: Guessing dimension ζ ≥ 0.
|
||||
:param babai: Insist on Babai's algorithm for finding close vectors.
|
||||
:param mitm: Simulate MITM approach (√ of search space).
|
||||
:param m: We accept the number of samples to consider from the calling function.
|
||||
:param d: We optionally accept the dimension to pick.
|
||||
|
||||
.. note :: This is the lowest level function that runs no optimization, it merely reports
|
||||
costs.
|
||||
|
||||
"""
|
||||
if d is None:
|
||||
delta = deltaf(beta)
|
||||
d = min(ceil(sqrt(params.n * log(params.q) / log(delta))), m) + 1
|
||||
d -= zeta
|
||||
xi = PrimalUSVP._xi_factor(params.Xs, params.Xe)
|
||||
|
||||
# 1. Simulate BKZ-β
|
||||
# TODO: pick τ
|
||||
r = simulator(d, params.n - zeta, params.q, beta, xi=xi)
|
||||
bkz_cost = costf(red_cost_model, beta, d)
|
||||
|
||||
# 2. Required SVP dimension η
|
||||
if babai:
|
||||
eta = 2
|
||||
svp_cost = PrimalHybrid.babai_cost(d)
|
||||
else:
|
||||
# we scaled the lattice so that χ_e is what we want
|
||||
eta = PrimalHybrid.svp_dimension(r, params.Xe)
|
||||
svp_cost = costf(red_cost_model, eta, eta)
|
||||
# when η ≪ β, lifting may be a bigger cost
|
||||
svp_cost["rop"] += PrimalHybrid.babai_cost(d - eta)["rop"]
|
||||
|
||||
# 3. Search
|
||||
# We need to do one BDD call at least
|
||||
search_space, probability, hw = 1, 1.0, 0
|
||||
|
||||
# MITM or no MITM
|
||||
# TODO: this is rather clumsy as a model
|
||||
def ssf(x):
|
||||
if mitm:
|
||||
return RR(sqrt(x))
|
||||
else:
|
||||
return x
|
||||
|
||||
# e.g. (-1, 1) -> two non-zero per entry
|
||||
base = params.Xs.bounds[1] - params.Xs.bounds[0]
|
||||
|
||||
if zeta:
|
||||
# the number of non-zero entries
|
||||
h = ceil(len(params.Xs) * params.Xs.density)
|
||||
probability = RR(prob_drop(params.n, h, zeta))
|
||||
hw = 1
|
||||
while hw < min(h, zeta):
|
||||
new_search_space = binomial(zeta, hw) * base ** hw
|
||||
if svp_cost.repeat(ssf(search_space + new_search_space))["rop"] < bkz_cost["rop"]:
|
||||
search_space += new_search_space
|
||||
probability += prob_drop(params.n, h, zeta, fail=hw)
|
||||
hw += 1
|
||||
else:
|
||||
break
|
||||
|
||||
svp_cost = svp_cost.repeat(ssf(search_space))
|
||||
|
||||
if mitm and zeta > 0:
|
||||
if babai:
|
||||
probability *= mitm_babai_probability(r, params.Xe.stddev, params.q)
|
||||
else:
|
||||
# TODO: the probability in this case needs to be analysed
|
||||
probability *= 1
|
||||
|
||||
if eta <= 20 and d >= 0: # NOTE: η: somewhat arbitrary bound, d: we may guess it all
|
||||
probability *= RR(prob_babai(r, sqrt(d) * params.Xe.stddev))
|
||||
|
||||
ret = Cost()
|
||||
ret["rop"] = bkz_cost["rop"] + svp_cost["rop"]
|
||||
ret["red"] = bkz_cost["rop"]
|
||||
ret["svp"] = svp_cost["rop"]
|
||||
ret["beta"] = beta
|
||||
ret["eta"] = eta
|
||||
ret["zeta"] = zeta
|
||||
ret["|S|"] = search_space
|
||||
ret["d"] = d
|
||||
ret["prob"] = probability
|
||||
|
||||
ret.register_impermanent(
|
||||
{"|S|": False},
|
||||
rop=True,
|
||||
red=True,
|
||||
svp=True,
|
||||
eta=False,
|
||||
zeta=False,
|
||||
prob=False,
|
||||
)
|
||||
|
||||
# 4. Repeat whole experiment ~1/prob times
|
||||
if probability and not RR(probability).is_NaN():
|
||||
ret = ret.repeat(
|
||||
prob_amplify(0.99, probability),
|
||||
)
|
||||
else:
|
||||
return Cost(rop=oo)
|
||||
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def cost_zeta(
|
||||
cls,
|
||||
zeta: int,
|
||||
params: LWEParameters,
|
||||
red_shape_model=red_simulator_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
m: int = oo,
|
||||
babai: bool = True,
|
||||
mitm: bool = True,
|
||||
optimize_d=True,
|
||||
log_level=5,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
This function optimizes costs for a fixed guessing dimension ζ.
|
||||
"""
|
||||
|
||||
# step 0. establish baseline
|
||||
baseline_cost = primal_usvp(
|
||||
params,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
optimize_d=False,
|
||||
log_level=log_level + 1,
|
||||
**kwds,
|
||||
)
|
||||
Logging.log("bdd", log_level, f"H0: {repr(baseline_cost)}")
|
||||
|
||||
f = partial(
|
||||
cls.cost,
|
||||
params=params,
|
||||
zeta=zeta,
|
||||
babai=babai,
|
||||
mitm=mitm,
|
||||
simulator=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
m=m,
|
||||
**kwds,
|
||||
)
|
||||
|
||||
# step 1. optimize β
|
||||
with local_minimum(
|
||||
40, baseline_cost["beta"] + 1, precision=2, log_level=log_level + 1
|
||||
) as it:
|
||||
for beta in it:
|
||||
it.update(f(beta))
|
||||
for beta in it.neighborhood:
|
||||
it.update(f(beta))
|
||||
cost = it.y
|
||||
|
||||
Logging.log("bdd", log_level, f"H1: {repr(cost)}")
|
||||
|
||||
# step 2. optimize d
|
||||
if cost and cost.get("tag", "XXX") != "usvp" and optimize_d:
|
||||
with local_minimum(
|
||||
params.n, cost["d"] + cost["zeta"] + 1, log_level=log_level + 1
|
||||
) as it:
|
||||
for d in it:
|
||||
it.update(f(beta=cost["beta"], d=d))
|
||||
cost = it.y
|
||||
Logging.log("bdd", log_level, f"H2: {repr(cost)}")
|
||||
|
||||
if cost is None:
|
||||
return Cost(rop=oo)
|
||||
return cost
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
params: LWEParameters,
|
||||
babai: bool = True,
|
||||
zeta: int = None,
|
||||
mitm: bool = True,
|
||||
red_shape_model=red_shape_model_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate the cost of the hybrid attack and its variants.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param zeta: Guessing dimension ζ ≥ 0.
|
||||
:param babai: Insist on Babai's algorithm for finding close vectors.
|
||||
:param mitm: Simulate MITM approach (√ of search space).
|
||||
:return: A cost dictionary
|
||||
|
||||
The returned cost dictionary has the following entries:
|
||||
|
||||
- ``rop``: Total number of word operations (≈ CPU cycles).
|
||||
- ``red``: Number of word operations in lattice reduction.
|
||||
- ``δ``: Root-Hermite factor targeted by lattice reduction.
|
||||
- ``β``: BKZ block size.
|
||||
- ``η``: Dimension of the final BDD call.
|
||||
- ``ζ``: Number of guessed coordinates.
|
||||
- ``|S|``: Guessing search space.
|
||||
- ``prob``: Probability of success in guessing.
|
||||
- ``repeat``: How often to repeat the attack.
|
||||
- ``d``: Lattice dimension.
|
||||
|
||||
- When ζ = 0 this function essentially estimates the BDD strategy as given in [RSA:LiuNgu13]_.
|
||||
- When ζ ≠ 0 and ``babai=True`` this function estimates the hybrid attack as given in
|
||||
[C:HowgraveGraham07]_
|
||||
- When ζ ≠ 0 and ``babai=False`` this function estimates the hybrid attack as given in
|
||||
[SAC:AlbCurWun19]_
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator import *
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = False)
|
||||
rop: ≈2^94.9, red: ≈2^94.3, svp: ≈2^93.3, β: 178, η: 21, ζ: 256, |S|: ≈2^56.6, d: 531, prob: 0.003, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = True)
|
||||
rop: ≈2^94.7, red: ≈2^94.0, svp: ≈2^93.3, β: 169, η: 2, ζ: 256, |S|: ≈2^62.4, d: 519, prob: 0.001, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = False)
|
||||
rop: ≈2^75.4, red: ≈2^75.0, svp: ≈2^73.3, β: 102, η: 15, ζ: 322, |S|: ≈2^82.9, d: 354, prob: 0.001, ...
|
||||
|
||||
>>> LWE.primal_hybrid(Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = True)
|
||||
rop: ≈2^86.6, red: ≈2^85.7, svp: ≈2^85.6, β: 104, η: 2, ζ: 371, |S|: ≈2^91.1, d: 308, prob: ≈2^-21.3, ...
|
||||
|
||||
"""
|
||||
|
||||
if zeta == 0:
|
||||
tag = "bdd"
|
||||
else:
|
||||
tag = "hybrid"
|
||||
|
||||
params = LWEParameters.normalize(params)
|
||||
|
||||
# allow for a larger embedding lattice dimension: Bai and Galbraith
|
||||
m = params.m + params.n if params.Xs <= params.Xe else params.m
|
||||
|
||||
red_shape_model = simulator_normalize(red_shape_model)
|
||||
|
||||
f = partial(
|
||||
self.cost_zeta,
|
||||
params=params,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
babai=babai,
|
||||
mitm=mitm,
|
||||
m=m,
|
||||
log_level=log_level + 1,
|
||||
)
|
||||
|
||||
if zeta is None:
|
||||
with local_minimum(0, params.n, log_level=log_level) as it:
|
||||
for zeta in it:
|
||||
it.update(
|
||||
f(
|
||||
zeta=zeta,
|
||||
optimize_d=False,
|
||||
**kwds,
|
||||
)
|
||||
)
|
||||
# TODO: this should not be required
|
||||
cost = min(it.y, f(0, optimize_d=False, **kwds))
|
||||
else:
|
||||
cost = f(zeta=zeta)
|
||||
|
||||
cost["tag"] = tag
|
||||
cost["problem"] = params
|
||||
|
||||
if tag == "bdd":
|
||||
cost["tag"] = tag
|
||||
cost["problem"] = params
|
||||
try:
|
||||
del cost["|S|"]
|
||||
del cost["prob"]
|
||||
del cost["repetitions"]
|
||||
del cost["zeta"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return cost
|
||||
|
||||
__name__ = "primal_hybrid"
|
||||
|
||||
|
||||
primal_hybrid = PrimalHybrid()
|
||||
|
||||
|
||||
def primal_bdd(
|
||||
params: LWEParameters,
|
||||
red_shape_model=red_shape_model_default,
|
||||
red_cost_model=red_cost_model_default,
|
||||
log_level=1,
|
||||
**kwds,
|
||||
):
|
||||
"""
|
||||
Estimate the cost of the BDD approach as given in [RSA:LiuNgu13]_.
|
||||
|
||||
:param params: LWE parameters.
|
||||
:param red_cost_model: How to cost lattice reduction
|
||||
:param red_shape_model: How to model the shape of a reduced basis
|
||||
|
||||
"""
|
||||
|
||||
return primal_hybrid(
|
||||
params,
|
||||
zeta=0,
|
||||
mitm=False,
|
||||
babai=False,
|
||||
red_shape_model=red_shape_model,
|
||||
red_cost_model=red_cost_model,
|
||||
log_level=log_level,
|
||||
**kwds,
|
||||
)
|
||||
374
estimator_new/nd.py
Normal file
374
estimator_new/nd.py
Normal file
@@ -0,0 +1,374 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from sage.all import parent, RR, RealField, sqrt, pi, oo, ceil, binomial, exp
|
||||
|
||||
|
||||
def stddevf(sigma):
|
||||
"""
|
||||
Gaussian width parameter σ → standard deviation.
|
||||
|
||||
:param sigma: Gaussian width parameter σ
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import stddevf
|
||||
>>> stddevf(64.0)
|
||||
25.532...
|
||||
|
||||
>>> stddevf(64)
|
||||
25.532...
|
||||
|
||||
>>> stddevf(RealField(256)(64)).prec()
|
||||
256
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
prec = parent(sigma).prec()
|
||||
except AttributeError:
|
||||
prec = 0
|
||||
if prec > 0:
|
||||
FF = parent(sigma)
|
||||
else:
|
||||
FF = RR
|
||||
return FF(sigma) / FF(sqrt(2 * pi))
|
||||
|
||||
|
||||
def sigmaf(stddev):
|
||||
"""
|
||||
Standard deviation → Gaussian width parameter σ.
|
||||
|
||||
:param stddev: standard deviation
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import stddevf, sigmaf
|
||||
>>> n = 64.0
|
||||
>>> sigmaf(stddevf(n))
|
||||
64.000...
|
||||
|
||||
>>> sigmaf(RealField(128)(1.0))
|
||||
2.5066282746310005024157652848110452530
|
||||
>>> sigmaf(1.0)
|
||||
2.506628274631...
|
||||
>>> sigmaf(1)
|
||||
2.506628274631...
|
||||
|
||||
"""
|
||||
RR = parent(stddev)
|
||||
# check that we got ourselves a real number type
|
||||
try:
|
||||
if abs(RR(0.5) - 0.5) > 0.001:
|
||||
RR = RealField(53) # hardcode something
|
||||
except TypeError:
|
||||
RR = RealField(53) # hardcode something
|
||||
return RR(sqrt(2 * pi)) * stddev
|
||||
|
||||
|
||||
@dataclass
|
||||
class NoiseDistribution:
|
||||
"""
|
||||
All noise distributions are instances of this class.
|
||||
|
||||
"""
|
||||
|
||||
# cut-off for Gaussian distributions
|
||||
gaussian_tail_bound = 2
|
||||
|
||||
# probability that a coefficient falls within the cut-off
|
||||
gaussian_tail_prob = 1 - 2 * exp(-4 * pi)
|
||||
|
||||
stddev: float
|
||||
mean: float = 0
|
||||
n: int = None
|
||||
bounds: tuple = None
|
||||
density: float = 1.0 # Hamming weight / dimension.
|
||||
tag: str = ""
|
||||
|
||||
def __lt__(self, other):
|
||||
"""
|
||||
We compare distributions by comparing their standard deviation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(2.0) < ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(3.0) < ND.CenteredBinomial(18)
|
||||
False
|
||||
>>> ND.DiscreteGaussian(4.0) < ND.CenteredBinomial(18)
|
||||
False
|
||||
|
||||
"""
|
||||
try:
|
||||
return self.stddev < other.stddev
|
||||
except AttributeError:
|
||||
return self.stddev < other
|
||||
|
||||
def __le__(self, other):
|
||||
"""
|
||||
We compare distributions by comparing their standard deviation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(2.0) <= ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(3.0) <= ND.CenteredBinomial(18)
|
||||
True
|
||||
>>> ND.DiscreteGaussian(4.0) <= ND.CenteredBinomial(18)
|
||||
False
|
||||
|
||||
"""
|
||||
try:
|
||||
return self.stddev <= other.stddev
|
||||
except AttributeError:
|
||||
return self.stddev <= other
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussianAlpha(0.01, 7681)
|
||||
D(σ=30.64)
|
||||
|
||||
"""
|
||||
if self.n:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f}, n={int(self.n)})"
|
||||
else:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f})"
|
||||
|
||||
def __repr__(self):
|
||||
if self.mean == 0.0:
|
||||
return f"D(σ={float(self.stddev):.2f})"
|
||||
else:
|
||||
return f"D(σ={float(self.stddev):.2f}, μ={float(self.mean):.2f})"
|
||||
|
||||
def __hash__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> hash(ND(3.0, 1.0)) == hash((3.0, 1.0, None))
|
||||
True
|
||||
|
||||
"""
|
||||
return hash((self.stddev, self.mean, self.n))
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> D = ND.SparseTernary(1024, p=128, m=128)
|
||||
>>> len(D)
|
||||
1024
|
||||
>>> int(round(len(D) * float(D.density)))
|
||||
256
|
||||
|
||||
"""
|
||||
if self.n is not None:
|
||||
return self.n
|
||||
else:
|
||||
raise ValueError("Distribution has no length.")
|
||||
|
||||
@property
|
||||
def is_Gaussian_like(self):
|
||||
return ("Gaussian" in self.tag) or ("CenteredBinomial" in self.tag)
|
||||
|
||||
@property
|
||||
def is_bounded(self):
|
||||
return (self.bounds[1] - self.bounds[0]) < oo
|
||||
|
||||
@property
|
||||
def is_sparse(self):
|
||||
"""
|
||||
We consider a distribution "sparse" if its density is < 1/2.
|
||||
"""
|
||||
# NOTE: somewhat arbitrary
|
||||
return self.density < 0.5
|
||||
|
||||
def support_size(self, n=None, fraction=1.0):
|
||||
"""
|
||||
Compute the size of the support covering the probability given as fraction.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> D = ND.Uniform(-3,3, 64)
|
||||
>>> D.support_size(fraction=.99)
|
||||
1207562882759477428726191443614714994252339953407098880
|
||||
>>> D = ND.SparseTernary(64, 8)
|
||||
>>> D.support_size()
|
||||
32016101348447354880
|
||||
"""
|
||||
if not n:
|
||||
if not self.n:
|
||||
raise ValueError(f"Length required to determine support size, but n was {n}.")
|
||||
n = self.n
|
||||
|
||||
if "SparseTernary" in self.tag:
|
||||
h = self.h
|
||||
# TODO: this is assuming that the non-zero entries are uniform over {-1,1}
|
||||
# need p and m for more accurate calculation
|
||||
size = 2 ** h * binomial(n, h) * RR(fraction)
|
||||
elif self.is_bounded:
|
||||
# TODO: this might be suboptimal/inaccurate for binomial distribution
|
||||
a, b = self.bounds
|
||||
size = RR(fraction) * (b - a + 1) ** n
|
||||
else:
|
||||
# Looks like nd is Gaussian
|
||||
# -> we'll treat it as bounded (with failure probability)
|
||||
t = self.gaussian_tail_bound
|
||||
p = self.gaussian_tail_prob
|
||||
|
||||
if p ** n < fraction:
|
||||
raise NotImplementedError(
|
||||
f"TODO(nd.support-size): raise t. {RR(p ** n)}, {n}, {fraction}"
|
||||
)
|
||||
|
||||
b = 2 * t * sigmaf(self.stddev) + 1
|
||||
return (2 * b + 1) ** n
|
||||
return ceil(size)
|
||||
|
||||
def get_hamming_weight(self, n=None):
|
||||
if hasattr(self, "h"):
|
||||
return self.h
|
||||
|
||||
if not n:
|
||||
if not self.n:
|
||||
raise ValueError("Length required to determine hamming weight.")
|
||||
n = self.n
|
||||
return round(n * self.density)
|
||||
|
||||
@staticmethod
|
||||
def DiscreteGaussian(stddev, mean=0, n=None):
|
||||
"""
|
||||
A discrete Gaussian distribution with standard deviation ``stddev`` per component.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussian(3.0, 1.0)
|
||||
D(σ=3.00, μ=1.00)
|
||||
|
||||
"""
|
||||
return NoiseDistribution(
|
||||
stddev=RR(stddev), mean=RR(mean), n=n, bounds=(-oo, oo), tag="DiscreteGaussian"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def DiscreteGaussianAlpha(alpha, q, mean=0, n=None):
|
||||
"""
|
||||
A discrete Gaussian distribution with standard deviation α⋅q/√(2π) per component.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.DiscreteGaussianAlpha(0.001, 2048)
|
||||
D(σ=0.82)
|
||||
|
||||
"""
|
||||
stddev = stddevf(alpha * q)
|
||||
return NoiseDistribution.DiscreteGaussian(stddev=RR(stddev), mean=RR(mean), n=n)
|
||||
|
||||
@staticmethod
|
||||
def CenteredBinomial(eta, n=None):
|
||||
"""
|
||||
Sample a_1, …, a_η, b_1, …, b_η and return Σ(a_i - b_i).
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.CenteredBinomial(8)
|
||||
D(σ=2.00)
|
||||
|
||||
"""
|
||||
stddev = sqrt(eta / 2.0)
|
||||
# TODO: density
|
||||
return NoiseDistribution(
|
||||
stddev=RR(stddev), mean=RR(0), n=n, bounds=(-eta, eta), tag="CenteredBinomial"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Uniform(a, b, n=None):
|
||||
"""
|
||||
Uniform distribution ∈ ``[a,b]``, endpoints inclusive.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.Uniform(-3, 3)
|
||||
D(σ=2.00)
|
||||
>>> ND.Uniform(-4, 3)
|
||||
D(σ=2.29, μ=-0.50)
|
||||
|
||||
"""
|
||||
if b < a:
|
||||
raise ValueError(f"upper limit must be larger than lower limit but got: {b} < {a}")
|
||||
m = b - a + 1
|
||||
mean = (a + b) / RR(2)
|
||||
stddev = sqrt((m ** 2 - 1) / RR(12))
|
||||
|
||||
if a <= 0 and 0 <= b:
|
||||
density = 1.0 - 1.0 / m
|
||||
else:
|
||||
density = 0.0
|
||||
|
||||
return NoiseDistribution(
|
||||
n=n, stddev=stddev, mean=mean, bounds=(a, b), density=density, tag="Uniform"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def UniformMod(q, n=None):
|
||||
"""
|
||||
Uniform mod ``q``, with balanced representation.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.UniformMod(7)
|
||||
D(σ=2.00)
|
||||
>>> ND.UniformMod(8)
|
||||
D(σ=2.29, μ=-0.50)
|
||||
|
||||
|
||||
"""
|
||||
a = -(q // 2)
|
||||
b = q // 2
|
||||
if q % 2 == 0:
|
||||
b -= 1
|
||||
return NoiseDistribution.Uniform(a, b, n=n)
|
||||
|
||||
@staticmethod
|
||||
def SparseTernary(n, p, m=None):
|
||||
"""
|
||||
Distribution of vectors of length ``n`` with ``p`` entries of 1 and ``m`` entries of -1, rest 0.
|
||||
|
||||
EXAMPLE::
|
||||
>>> from estimator.nd import NoiseDistribution as ND
|
||||
>>> ND.SparseTernary(100, p=10)
|
||||
D(σ=0.45)
|
||||
>>> ND.SparseTernary(100, p=10, m=10)
|
||||
D(σ=0.45)
|
||||
>>> ND.SparseTernary(100, p=10, m=8)
|
||||
D(σ=0.42, μ=0.02)
|
||||
|
||||
"""
|
||||
if m is None:
|
||||
m = p
|
||||
|
||||
if n == 0:
|
||||
# this might happen in the dual attack
|
||||
return NoiseDistribution(stddev=0, mean=0, density=0, bounds=(-1, 1), tag="SparseTernary", n=0)
|
||||
mean = RR(p / n - m / n)
|
||||
stddev = RR(sqrt((p + m) / n))
|
||||
density = RR((p + m) / n)
|
||||
D = NoiseDistribution(
|
||||
stddev=stddev, mean=mean, density=density, bounds=(-1, 1), tag="SparseTernary", n=n
|
||||
)
|
||||
D.h = p + m
|
||||
return D
|
||||
131
estimator_new/prob.py
Normal file
131
estimator_new/prob.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from sage.all import binomial, ZZ, log, ceil, RealField, oo, exp, pi
|
||||
from sage.all import RealDistribution, RR, sqrt, prod, erf
|
||||
from .nd import sigmaf
|
||||
|
||||
|
||||
def mitm_babai_probability(r, stddev, q, fast=False):
|
||||
"""
|
||||
Compute the "e-admissibility" probability associated to the mitm step, according to
|
||||
[EPRINT:SonChe19]_
|
||||
|
||||
:params r: the squared GSO lengths
|
||||
:params stddev: the std.dev of the error distribution
|
||||
:params q: the LWE modulus
|
||||
:param fast: toggle for setting p = 1 (faster, but underestimates security)
|
||||
:return: probability for the mitm process
|
||||
|
||||
# NOTE: the model sometimes outputs negative probabilities, we set p = 0 in this case
|
||||
"""
|
||||
|
||||
if fast:
|
||||
# overestimate the probability -> underestimate security
|
||||
p = 1
|
||||
else:
|
||||
# get non-squared norms
|
||||
R = [sqrt(s) for s in r]
|
||||
alphaq = sigmaf(stddev)
|
||||
probs = [
|
||||
RR(
|
||||
erf(s * sqrt(RR(pi)) / alphaq)
|
||||
+ (alphaq / s) * ((exp(-s * sqrt(RR(pi)) / alphaq) - 1) / RR(pi))
|
||||
)
|
||||
for s in R
|
||||
]
|
||||
p = RR(prod(probs))
|
||||
if p < 0 or p > 1:
|
||||
p = 0.0
|
||||
return p
|
||||
|
||||
|
||||
def babai(r, norm):
|
||||
"""
|
||||
Babai probability following [EPRINT:Wun16]_.
|
||||
|
||||
"""
|
||||
R = [RR(sqrt(t) / (2 * norm)) for t in r]
|
||||
T = RealDistribution("beta", ((len(r) - 1) / 2, 1.0 / 2))
|
||||
probs = [1 - T.cum_distribution_function(1 - s ** 2) for s in R]
|
||||
return prod(probs)
|
||||
|
||||
|
||||
def drop(n, h, k, fail=0, rotations=False):
|
||||
"""
|
||||
Probability that ``k`` randomly sampled components have ``fail`` non-zero components amongst
|
||||
them.
|
||||
|
||||
:param n: LWE dimension `n > 0`
|
||||
:param h: number of non-zero components
|
||||
:param k: number of components to ignore
|
||||
:param fail: we tolerate ``fail`` number of non-zero components amongst the `k` ignored
|
||||
components
|
||||
:param rotations: consider rotations of the basis to exploit ring structure (NTRU only)
|
||||
"""
|
||||
|
||||
N = n # population size
|
||||
K = n - h # number of success states in the population
|
||||
n = k # number of draws
|
||||
k = n - fail # number of observed successes
|
||||
prob_drop = binomial(K, k) * binomial(N - K, n - k) / binomial(N, n)
|
||||
if rotations:
|
||||
return 1 - (1 - prob_drop) ** N
|
||||
else:
|
||||
return prob_drop
|
||||
|
||||
|
||||
def amplify(target_success_probability, success_probability, majority=False):
|
||||
"""
|
||||
Return the number of trials needed to amplify current `success_probability` to
|
||||
`target_success_probability`
|
||||
|
||||
:param target_success_probability: targeted success probability < 1
|
||||
:param success_probability: targeted success probability < 1
|
||||
:param majority: if `True` amplify a deicsional problem, not a computational one
|
||||
if `False` then we assume that we can check solutions, so one success suffices
|
||||
|
||||
:returns: number of required trials to amplify
|
||||
"""
|
||||
if target_success_probability < success_probability:
|
||||
return ZZ(1)
|
||||
if success_probability == 0.0:
|
||||
return oo
|
||||
|
||||
prec = max(
|
||||
53,
|
||||
2 * ceil(abs(log(success_probability, 2))),
|
||||
2 * ceil(abs(log(1 - success_probability, 2))),
|
||||
2 * ceil(abs(log(target_success_probability, 2))),
|
||||
2 * ceil(abs(log(1 - target_success_probability, 2))),
|
||||
)
|
||||
prec = min(prec, 2048)
|
||||
RR = RealField(prec)
|
||||
|
||||
success_probability = RR(success_probability)
|
||||
target_success_probability = RR(target_success_probability)
|
||||
|
||||
try:
|
||||
if majority:
|
||||
eps = success_probability / 2
|
||||
return ceil(2 * log(2 - 2 * target_success_probability) / log(1 - 4 * eps ** 2))
|
||||
else:
|
||||
# target_success_probability = 1 - (1-success_probability)^trials
|
||||
return ceil(log(1 - target_success_probability) / log(1 - success_probability))
|
||||
except ValueError:
|
||||
return oo
|
||||
|
||||
|
||||
def amplify_sigma(target_advantage, sigma, q):
|
||||
"""
|
||||
Amplify distinguishing advantage for a given σ and q
|
||||
|
||||
:param target_advantage:
|
||||
:param sigma: (Lists of) Gaussian width parameters
|
||||
:param q: Modulus q > 0
|
||||
|
||||
"""
|
||||
try:
|
||||
sigma = sum(sigma_ ** 2 for sigma_ in sigma).sqrt()
|
||||
except TypeError:
|
||||
pass
|
||||
advantage = float(exp(-float(pi) * (float(sigma / q) ** 2)))
|
||||
return amplify(target_advantage, advantage, majority=True)
|
||||
571
estimator_new/reduction.py
Normal file
571
estimator_new/reduction.py
Normal file
@@ -0,0 +1,571 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Cost estimates for lattice redution.
|
||||
"""
|
||||
|
||||
from sage.all import ZZ, RR, pi, e, find_root, ceil, log, oo, round
|
||||
from scipy.optimize import newton
|
||||
|
||||
|
||||
def _delta(beta):
|
||||
"""
|
||||
Compute δ from block size β without enforcing β ∈ ZZ.
|
||||
|
||||
δ for β ≤ 40 were computed as follows:
|
||||
|
||||
```
|
||||
# -*- coding: utf-8 -*-
|
||||
from fpylll import BKZ, IntegerMatrix
|
||||
|
||||
from multiprocessing import Pool
|
||||
from sage.all import mean, sqrt, exp, log, cputime
|
||||
|
||||
d, trials = 320, 32
|
||||
|
||||
def f((A, beta)):
|
||||
|
||||
par = BKZ.Param(block_size=beta, strategies=BKZ.DEFAULT_STRATEGY, flags=BKZ.AUTO_ABORT)
|
||||
q = A[-1, -1]
|
||||
d = A.nrows
|
||||
t = cputime()
|
||||
A = BKZ.reduction(A, par, float_type="dd")
|
||||
t = cputime(t)
|
||||
return t, exp(log(A[0].norm()/sqrt(q).n())/d)
|
||||
|
||||
if __name__ == '__main__':
|
||||
for beta in (5, 10, 15, 20, 25, 28, 30, 35, 40):
|
||||
delta = []
|
||||
t = []
|
||||
i = 0
|
||||
while i < trials:
|
||||
threads = int(open("delta.nthreads").read()) # make sure this file exists
|
||||
pool = Pool(threads)
|
||||
A = [(IntegerMatrix.random(d, "qary", beta=d//2, bits=50), beta) for j in range(threads)]
|
||||
for (t_, delta_) in pool.imap_unordered(f, A):
|
||||
t.append(t_)
|
||||
delta.append(delta_)
|
||||
i += threads
|
||||
print u"β: %2d, δ_0: %.5f, time: %5.1fs, (%2d,%2d)"%(beta, mean(delta), mean(t), i, threads)
|
||||
print
|
||||
```
|
||||
|
||||
"""
|
||||
small = (
|
||||
(2, 1.02190), # noqa
|
||||
(5, 1.01862), # noqa
|
||||
(10, 1.01616),
|
||||
(15, 1.01485),
|
||||
(20, 1.01420),
|
||||
(25, 1.01342),
|
||||
(28, 1.01331),
|
||||
(40, 1.01295),
|
||||
)
|
||||
|
||||
if beta <= 2:
|
||||
return RR(1.0219)
|
||||
elif beta < 40:
|
||||
for i in range(1, len(small)):
|
||||
if small[i][0] > beta:
|
||||
return RR(small[i - 1][1])
|
||||
elif beta == 40:
|
||||
return RR(small[-1][1])
|
||||
else:
|
||||
return RR(beta / (2 * pi * e) * (pi * beta) ** (1 / beta)) ** (1 / (2 * (beta - 1)))
|
||||
|
||||
|
||||
def delta(beta):
|
||||
"""
|
||||
Compute root-Hermite factor δ from block size β.
|
||||
|
||||
:param beta: Block size.
|
||||
"""
|
||||
beta = ZZ(round(beta))
|
||||
return _delta(beta)
|
||||
|
||||
|
||||
def _beta_secant(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> 50 == RC._beta_secant(1.0121)
|
||||
True
|
||||
>>> 100 == RC._beta_secant(1.0093)
|
||||
True
|
||||
>>> RC._beta_secant(1.0024) # Chen reports 800
|
||||
808
|
||||
|
||||
"""
|
||||
# newton() will produce a "warning", if two subsequent function values are
|
||||
# indistinguishable (i.e. equal in terms of machine precision). In this case
|
||||
# newton() will return the value beta in the middle between the two values
|
||||
# k1,k2 for which the function values were indistinguishable.
|
||||
# Since f approaches zero for beta->+Infinity, this may be the case for very
|
||||
# large inputs, like beta=1e16.
|
||||
# For now, these warnings just get printed and the value beta is used anyways.
|
||||
# This seems reasonable, since for such large inputs the exact value of beta
|
||||
# doesn't make such a big difference.
|
||||
try:
|
||||
beta = newton(
|
||||
lambda beta: RR(_delta(beta) - delta),
|
||||
100,
|
||||
fprime=None,
|
||||
args=(),
|
||||
tol=1.48e-08,
|
||||
maxiter=500,
|
||||
)
|
||||
beta = ceil(beta)
|
||||
if beta < 40:
|
||||
# newton may output beta < 40. The old beta method wouldn't do this. For
|
||||
# consistency, call the old beta method, i.e. consider this try as "failed".
|
||||
raise RuntimeError("β < 40")
|
||||
return beta
|
||||
except (RuntimeError, TypeError):
|
||||
# if something fails, use old beta method
|
||||
beta = _beta_simple(delta)
|
||||
return beta
|
||||
|
||||
|
||||
def _beta_find_root(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC._beta_find_root(RC.delta(500))
|
||||
500
|
||||
|
||||
"""
|
||||
# handle beta < 40 separately
|
||||
beta = ZZ(40)
|
||||
if _delta(beta) < delta:
|
||||
return beta
|
||||
|
||||
try:
|
||||
beta = find_root(lambda beta: RR(_delta(beta) - delta), 40, 2 ** 16, maxiter=500)
|
||||
beta = ceil(beta - 1e-8)
|
||||
except RuntimeError:
|
||||
# finding root failed; reasons:
|
||||
# 1. maxiter not sufficient
|
||||
# 2. no root in given interval
|
||||
beta = _beta_simple(delta)
|
||||
return beta
|
||||
|
||||
|
||||
def _beta_simple(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-Hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-Hermite factor.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC._beta_simple(RC.delta(500))
|
||||
501
|
||||
|
||||
"""
|
||||
beta = ZZ(40)
|
||||
|
||||
while _delta(2 * beta) > delta:
|
||||
beta *= 2
|
||||
while _delta(beta + 10) > delta:
|
||||
beta += 10
|
||||
while True:
|
||||
if _delta(beta) < delta:
|
||||
break
|
||||
beta += 1
|
||||
|
||||
return beta
|
||||
|
||||
|
||||
def beta(delta):
|
||||
"""
|
||||
Estimate required block size β for a given root-hermite factor δ based on [PhD:Chen13]_.
|
||||
|
||||
:param delta: Root-hermite factor.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> 50 == RC.beta(1.0121)
|
||||
True
|
||||
>>> 100 == RC.beta(1.0093)
|
||||
True
|
||||
>>> RC.beta(1.0024) # Chen reports 800
|
||||
808
|
||||
|
||||
"""
|
||||
# TODO: decide for one strategy (secant, find_root, old) and its error handling
|
||||
beta = _beta_find_root(delta)
|
||||
return beta
|
||||
|
||||
|
||||
# BKZ Estimates
|
||||
|
||||
|
||||
def svp_repeat(beta, d):
|
||||
"""
|
||||
Return number of SVP calls in BKZ-β.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
|
||||
.. note :: Loosely based on experiments in [PhD:Chen13].
|
||||
|
||||
.. note :: When d ≤ β we return 1.
|
||||
|
||||
"""
|
||||
if beta < d:
|
||||
return 8 * d
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def LLL(d, B=None):
|
||||
"""
|
||||
Runtime estimation for LLL algorithm based on [AC:CheNgu11]_.
|
||||
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
"""
|
||||
if B:
|
||||
return d ** 3 * B ** 2
|
||||
else:
|
||||
return d ** 3 # ignoring B for backward compatibility
|
||||
|
||||
|
||||
def _BDGL16_small(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given β and assuming sieving is used to realise the SVP oracle for small
|
||||
dimensions following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC._BDGL16_small(500, 1024), 2.0)
|
||||
222.9
|
||||
|
||||
"""
|
||||
return LLL(d, B) + ZZ(2) ** RR(0.387 * beta + 16.4 + log(svp_repeat(beta, d), 2))
|
||||
|
||||
|
||||
def _BDGL16_asymptotic(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given `β` and assuming sieving is used to realise the SVP oracle following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
TESTS::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC._BDGL16_asymptotic(500, 1024), 2.0)
|
||||
175.4
|
||||
"""
|
||||
# TODO we simply pick the same additive constant 16.4 as for the experimental result in [SODA:BDGL16]_
|
||||
return LLL(d, B) + ZZ(2) ** RR(0.292 * beta + 16.4 + log(svp_repeat(beta, d), 2))
|
||||
|
||||
|
||||
def BDGL16(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given `β` and assuming sieving is used to realise the SVP oracle following [SODA:BDGL16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.BDGL16(500, 1024), 2.0)
|
||||
175.4
|
||||
|
||||
"""
|
||||
# TODO this is somewhat arbitrary
|
||||
if beta <= 90:
|
||||
return _BDGL16_small(beta, d, B)
|
||||
else:
|
||||
return _BDGL16_asymptotic(beta, d, B)
|
||||
|
||||
|
||||
def LaaMosPol14(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation for quantum sieving following [EPRINT:LaaMosPol14]_ and [PhD:Laarhoven15]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.LaaMosPol14(500, 1024), 2.0)
|
||||
161.9
|
||||
|
||||
"""
|
||||
return LLL(d, B) + ZZ(2) ** RR((0.265 * beta + 16.4 + log(svp_repeat(beta, d), 2)))
|
||||
|
||||
|
||||
def CheNgu12(beta, d, B=None):
|
||||
"""
|
||||
Runtime estimation given β and assuming [CheNgu12]_ estimates are correct.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
The constants in this function were derived as follows based on Table 4 in
|
||||
[CheNgu12]_::
|
||||
|
||||
>>> from sage.all import var, find_fit
|
||||
>>> dim = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250]
|
||||
>>> nodes = [39.0, 44.0, 49.0, 54.0, 60.0, 66.0, 72.0, 78.0, 84.0, 96.0, 99.0, 105.0, 111.0, 120.0, 127.0, 134.0] # noqa
|
||||
>>> times = [c + log(200,2).n() for c in nodes]
|
||||
>>> T = list(zip(dim, nodes))
|
||||
>>> var("a,b,c,beta")
|
||||
(a, b, c, beta)
|
||||
>>> f = a*beta*log(beta, 2.0) + b*beta + c
|
||||
>>> f = f.function(beta)
|
||||
>>> f.subs(find_fit(T, f, solution_dict=True))
|
||||
beta |--> 0.2701...*beta*log(beta) - 1.0192...*beta + 16.10...
|
||||
|
||||
The estimation 2^(0.18728 β⋅log_2(β) - 1.019⋅β + 16.10) is of the number of enumeration
|
||||
nodes, hence we need to multiply by the number of cycles to process one node. This cost per
|
||||
node is typically estimated as 64.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.CheNgu12(500, 1024), 2.0)
|
||||
365.70...
|
||||
|
||||
"""
|
||||
repeat = svp_repeat(beta, d)
|
||||
cost = RR(
|
||||
0.270188776350190 * beta * log(beta)
|
||||
- 1.0192050451318417 * beta
|
||||
+ 16.10253135200765
|
||||
+ log(100, 2)
|
||||
)
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ABFKSW20(beta, d, B=None):
|
||||
"""
|
||||
Enumeration cost according to [C:ABFKSW20]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ABFKSW20(500, 1024), 2.0)
|
||||
316.26...
|
||||
|
||||
"""
|
||||
if 1.5 * beta >= d or beta <= 92: # 1.5β is a bit arbitrary, β≤92 is the crossover point
|
||||
cost = RR(0.1839 * beta * log(beta, 2) - 0.995 * beta + 16.25 + log(64, 2))
|
||||
else:
|
||||
cost = RR(0.125 * beta * log(beta, 2) - 0.547 * beta + 10.4 + log(64, 2))
|
||||
|
||||
repeat = svp_repeat(beta, d)
|
||||
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ABLR21(beta, d, B=None):
|
||||
"""
|
||||
Enumeration cost according to [C:ABLR21]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ABLR21(500, 1024), 2.0)
|
||||
278.20...
|
||||
|
||||
"""
|
||||
if 1.5 * beta >= d or beta <= 97: # 1.5β is a bit arbitrary, 97 is the crossover
|
||||
cost = RR(0.1839 * beta * log(beta, 2) - 1.077 * beta + 29.12 + log(64, 2))
|
||||
else:
|
||||
cost = RR(0.1250 * beta * log(beta, 2) - 0.654 * beta + 25.84 + log(64, 2))
|
||||
|
||||
repeat = svp_repeat(beta, d)
|
||||
|
||||
return LLL(d, B) + repeat * ZZ(2) ** cost
|
||||
|
||||
|
||||
def ADPS16(beta, d, B=None, mode="classical"):
|
||||
"""
|
||||
Runtime estimation from [USENIX:ADPS16]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.ADPS16(500, 1024), 2.0)
|
||||
146.0
|
||||
>>> log(RC.ADPS16(500, 1024, mode="quantum"), 2.0)
|
||||
132.5
|
||||
>>> log(RC.ADPS16(500, 1024, mode="paranoid"), 2.0)
|
||||
103.75
|
||||
|
||||
"""
|
||||
|
||||
if mode not in ("classical", "quantum", "paranoid"):
|
||||
raise ValueError(f"Mode {mode} not understood.")
|
||||
|
||||
c = {
|
||||
"classical": 0.2920,
|
||||
"quantum": 0.2650, # paper writes 0.262 but this isn't right, see above
|
||||
"paranoid": 0.2075,
|
||||
}
|
||||
|
||||
c = c[mode]
|
||||
|
||||
return ZZ(2) ** RR(c * beta)
|
||||
|
||||
|
||||
def d4f(beta):
|
||||
"""
|
||||
Dimensions "for free" following [EC:Ducas18]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
|
||||
If β' is output by this function then sieving is expected to be required up to dimension β-β'.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC.d4f(500)
|
||||
42.597...
|
||||
|
||||
"""
|
||||
return max(float(beta * log(4 / 3.0) / log(beta / (2 * pi * e))), 0.0)
|
||||
|
||||
|
||||
# These are not asymptotic expressions but compress the data in [AC:AGPS20]_ which covers up to
|
||||
# β = 1024
|
||||
NN_AGPS = {
|
||||
"all_pairs-classical": {"a": 0.4215069316613415, "b": 20.1669683097337},
|
||||
"all_pairs-dw": {"a": 0.3171724396445732, "b": 25.29828951733785},
|
||||
"all_pairs-g": {"a": 0.3155285835002801, "b": 22.478746811528048},
|
||||
"all_pairs-ge19": {"a": 0.3222895263943544, "b": 36.11746438609666},
|
||||
"all_pairs-naive_classical": {"a": 0.4186251294633655, "b": 9.899382654377058},
|
||||
"all_pairs-naive_quantum": {"a": 0.31401512556555794, "b": 7.694659515948326},
|
||||
"all_pairs-t_count": {"a": 0.31553282515234704, "b": 20.878594142502994},
|
||||
"list_decoding-classical": {"a": 0.2988026130564745, "b": 26.011121212891872},
|
||||
"list_decoding-dw": {"a": 0.26944796385592995, "b": 28.97237346443934},
|
||||
"list_decoding-g": {"a": 0.26937450988892553, "b": 26.925140365395972},
|
||||
"list_decoding-ge19": {"a": 0.2695210400018704, "b": 35.47132142280775},
|
||||
"list_decoding-naive_classical": {"a": 0.2973130399197453, "b": 21.142124058689426},
|
||||
"list_decoding-naive_quantum": {"a": 0.2674316807758961, "b": 18.720680589028465},
|
||||
"list_decoding-t_count": {"a": 0.26945736714156543, "b": 25.913746774011887},
|
||||
"random_buckets-classical": {"a": 0.35586144233444716, "b": 23.082527816636638},
|
||||
"random_buckets-dw": {"a": 0.30704199612690264, "b": 25.581968903639485},
|
||||
"random_buckets-g": {"a": 0.30610964725102385, "b": 22.928235564044563},
|
||||
"random_buckets-ge19": {"a": 0.31089687599538407, "b": 36.02129978813208},
|
||||
"random_buckets-naive_classical": {"a": 0.35448283789554513, "b": 15.28878540793908},
|
||||
"random_buckets-naive_quantum": {"a": 0.30211421791887644, "b": 11.151745013027089},
|
||||
"random_buckets-t_count": {"a": 0.30614770082829745, "b": 21.41830142853265},
|
||||
}
|
||||
|
||||
|
||||
def Kyber(beta, d, B=None, nn="classical", C=5.46):
|
||||
"""
|
||||
Runtime estimation from [Kyber20]_ and [AC:AGPS20]_.
|
||||
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
:param nn: Nearest neighbor cost model. We default to "ListDecoding" (i.e. BDGL16) and to
|
||||
the "depth × width" metric. Kyber uses "AllPairs".
|
||||
:param C: Progressive overhead lim_{β → ∞} ∑_{i ≤ β} 2^{0.292 i + o(i)}/2^{0.292 β + o(β)}.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> from math import log
|
||||
>>> import estimator.reduction as RC
|
||||
>>> log(RC.Kyber(500, 1024), 2.0)
|
||||
174.16...
|
||||
>>> log(RC.Kyber(500, 1024, nn="list_decoding-ge19"), 2.0)
|
||||
170.23...
|
||||
|
||||
"""
|
||||
|
||||
if beta < 20: # goes haywire
|
||||
return CheNgu12(beta, d, B)
|
||||
|
||||
if nn == "classical":
|
||||
nn = "list_decoding-classical"
|
||||
elif nn == "quantum":
|
||||
nn = "list_decoding-dw"
|
||||
|
||||
svp_calls = C * max(d - beta, 1)
|
||||
# we do not round to the nearest integer to ensure cost is continuously increasing with β which
|
||||
# rounding can violate.
|
||||
beta_ = beta - d4f(beta)
|
||||
gate_count = 2 ** (NN_AGPS[nn]["a"] * beta_ + NN_AGPS[nn]["b"])
|
||||
return LLL(d, B=B) + svp_calls * gate_count
|
||||
|
||||
|
||||
def cost(cost_model, beta, d, B=None, predicate=None, **kwds):
|
||||
"""
|
||||
Return cost dictionary for computing vector of norm` δ_0^{d-1} Vol(Λ)^{1/d}` using provided lattice
|
||||
reduction algorithm.
|
||||
|
||||
:param cost_model:
|
||||
:param beta: Block size ≥ 2.
|
||||
:param d: Lattice dimension.
|
||||
:param B: Bit-size of entries.
|
||||
:param predicate: if ``False`` cost will be infinity.
|
||||
|
||||
EXAMPLE::
|
||||
|
||||
>>> import estimator.reduction as RC
|
||||
>>> RC.cost(RC.ABLR21, 120, 500)
|
||||
rop: ≈2^68.9, red: ≈2^68.9, δ: 1.008435, β: 120, d: 500
|
||||
>>> RC.cost(RC.ABLR21, 120, 500, predicate=False)
|
||||
rop: ≈2^inf, red: ≈2^inf, δ: 1.008435, β: 120, d: 500
|
||||
|
||||
"""
|
||||
from .cost import Cost
|
||||
|
||||
cost = cost_model(beta, d, B)
|
||||
delta_ = delta(beta)
|
||||
cost = Cost(rop=cost, red=cost, delta=delta_, beta=beta, d=d, **kwds)
|
||||
cost.register_impermanent(rop=True, red=True, delta=False, beta=False, d=False)
|
||||
if predicate is not None and not predicate:
|
||||
cost["red"] = oo
|
||||
cost["rop"] = oo
|
||||
return cost
|
||||
402
estimator_new/schemes.py
Normal file
402
estimator_new/schemes.py
Normal file
@@ -0,0 +1,402 @@
|
||||
from .nd import NoiseDistribution, stddevf
|
||||
from .lwe_parameters import LWEParameters
|
||||
|
||||
#
|
||||
# Kyber
|
||||
#
|
||||
#
|
||||
# https://pq-crystals.org/kyber/data/kyber-specification-round3-20210804.pdf
|
||||
# Table 1, Page 11, we are ignoring the compression
|
||||
#
|
||||
# https://eprint.iacr.org/2020/1308.pdf
|
||||
# Table 2, page 27, disagrees on Kyber 512
|
||||
|
||||
Kyber512 = LWEParameters(
|
||||
n=2 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(3),
|
||||
Xe=NoiseDistribution.CenteredBinomial(3),
|
||||
m=2 * 256,
|
||||
tag="Kyber 512",
|
||||
)
|
||||
|
||||
Kyber768 = LWEParameters(
|
||||
n=3 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(2),
|
||||
Xe=NoiseDistribution.CenteredBinomial(2),
|
||||
m=3 * 256,
|
||||
tag="Kyber 768",
|
||||
)
|
||||
|
||||
Kyber1024 = LWEParameters(
|
||||
n=4 * 256,
|
||||
q=3329,
|
||||
Xs=NoiseDistribution.CenteredBinomial(2),
|
||||
Xe=NoiseDistribution.CenteredBinomial(2),
|
||||
m=4 * 256,
|
||||
tag="Kyber 1024",
|
||||
)
|
||||
|
||||
#
|
||||
# Saber
|
||||
#
|
||||
#
|
||||
# https://www.esat.kuleuven.be/cosic/pqcrypto/saber/files/saberspecround3.pdf
|
||||
# Table 1, page 11
|
||||
#
|
||||
# https://eprint.iacr.org/2020/1308.pdf
|
||||
# Table 2, page 27, agrees
|
||||
|
||||
LightSaber = LWEParameters(
|
||||
n=2 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(5),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=2 * 256,
|
||||
tag="LightSaber",
|
||||
)
|
||||
|
||||
Saber = LWEParameters(
|
||||
n=3 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(4),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=3 * 256,
|
||||
tag="Saber",
|
||||
)
|
||||
|
||||
FireSaber = LWEParameters(
|
||||
n=4 * 256,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.CenteredBinomial(3),
|
||||
Xe=NoiseDistribution.UniformMod(7),
|
||||
m=4 * 256,
|
||||
tag="FireSaber",
|
||||
)
|
||||
|
||||
NTRUHPS2048509Enc = LWEParameters(
|
||||
n=508,
|
||||
q=2048,
|
||||
Xe=NoiseDistribution.SparseTernary(508, 2048 / 16 - 1),
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
m=508,
|
||||
tag="NTRUHPS2048509Enc",
|
||||
)
|
||||
|
||||
NTRUHPS2048677Enc = LWEParameters(
|
||||
n=676,
|
||||
q=2048,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.SparseTernary(676, 2048 / 16 - 1),
|
||||
m=676,
|
||||
tag="NTRUHPS2048677Enc",
|
||||
)
|
||||
|
||||
NTRUHPS4096821Enc = LWEParameters(
|
||||
n=820,
|
||||
q=4096,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.SparseTernary(820, 4096 / 16 - 1),
|
||||
m=820,
|
||||
tag="NTRUHPS4096821Enc",
|
||||
)
|
||||
|
||||
NTRUHRSS701Enc = LWEParameters(
|
||||
n=700,
|
||||
q=8192,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.UniformMod(3),
|
||||
m=700,
|
||||
tag="NTRUHRSS701",
|
||||
)
|
||||
|
||||
NISTPQC_R3 = (
|
||||
Kyber512,
|
||||
Kyber768,
|
||||
Kyber1024,
|
||||
LightSaber,
|
||||
Saber,
|
||||
FireSaber,
|
||||
NTRUHPS2048509Enc,
|
||||
NTRUHPS2048677Enc,
|
||||
NTRUHPS4096821Enc,
|
||||
NTRUHRSS701Enc,
|
||||
)
|
||||
|
||||
HESv111024128error = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 27,
|
||||
Xs=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
m=1024,
|
||||
tag="HESv11error",
|
||||
)
|
||||
|
||||
HESv111024128ternary = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 27,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(3.0),
|
||||
m=1024,
|
||||
tag="HESv11ternary",
|
||||
)
|
||||
|
||||
HESv11 = (HESv111024128error, HESv111024128ternary)
|
||||
|
||||
|
||||
# FHE schemes
|
||||
|
||||
# TFHE
|
||||
# https://tfhe.github.io/tfhe/security_and_params.html
|
||||
TFHE630 = LWEParameters(
|
||||
n=630,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="TFHE630",
|
||||
)
|
||||
|
||||
TFHE1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-25) * 2 ** 32),
|
||||
tag="TFHE630",
|
||||
)
|
||||
|
||||
# https://eprint.iacr.org/2018/421.pdf
|
||||
# Table 3, page 55
|
||||
TFHE16_500 = LWEParameters(
|
||||
n=500,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2.43 * 10 ** (-5) * 2 ** 32),
|
||||
tag="TFHE16_500",
|
||||
)
|
||||
|
||||
TFHE16_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.73 * 10 ** (-9) * 2 ** 32),
|
||||
tag="TFHE16_1024",
|
||||
)
|
||||
|
||||
# https://eprint.iacr.org/2018/421.pdf
|
||||
# Table 4, page 55
|
||||
TFHE20_612 = LWEParameters(
|
||||
n=612,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="TFHE20_612",
|
||||
)
|
||||
|
||||
TFHE20_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-26) * 2 ** 32),
|
||||
tag="TFHE20_1024",
|
||||
)
|
||||
|
||||
# FHEW
|
||||
# https://eprint.iacr.org/2014/816.pdf
|
||||
# page 14
|
||||
FHEW = LWEParameters(
|
||||
n=500,
|
||||
q=2 ** 32,
|
||||
Xs=NoiseDistribution.UniformMod(2),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2 ** 32),
|
||||
tag="FHEW",
|
||||
)
|
||||
|
||||
# SEAL
|
||||
|
||||
# v2.0
|
||||
# https://www.microsoft.com/en-us/research/wp-content/uploads/2016/09/sealmanual.pdf
|
||||
# Table 3, page 19
|
||||
|
||||
SEAL20_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 48 - 2 ** 20 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_1024",
|
||||
)
|
||||
|
||||
SEAL20_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 94 - 2 ** 20 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_2048",
|
||||
)
|
||||
|
||||
SEAL20_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 190 - 2 ** 30 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_4096",
|
||||
)
|
||||
|
||||
SEAL20_8192 = LWEParameters(
|
||||
n=8192,
|
||||
q=2 ** 383 - 2 ** 33 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_8192",
|
||||
)
|
||||
|
||||
SEAL20_16384 = LWEParameters(
|
||||
n=16384,
|
||||
q=2 ** 767 - 2 ** 56 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL20_16384",
|
||||
)
|
||||
|
||||
# v2.2
|
||||
# https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/sealmanual_v2.2.pdf
|
||||
# Table 3, page 20
|
||||
SEAL22_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 60 - 2 ** 14 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_2048",
|
||||
)
|
||||
|
||||
SEAL22_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 116 - 2 ** 18 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_4096",
|
||||
)
|
||||
|
||||
SEAL22_8192 = LWEParameters(
|
||||
n=8192,
|
||||
q=2 ** 226 - 2 ** 26 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_8192",
|
||||
)
|
||||
|
||||
SEAL22_16384 = LWEParameters(
|
||||
n=16384,
|
||||
q=2 ** 435 - 2 ** 33 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_16384",
|
||||
)
|
||||
|
||||
SEAL22_32768 = LWEParameters(
|
||||
n=32768,
|
||||
q=2 ** 889 - 2 ** 54 - 2 ** 53 - 2 ** 52 + 1,
|
||||
Xs=NoiseDistribution.UniformMod(3),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19),
|
||||
tag="SEAL22_32768",
|
||||
)
|
||||
|
||||
# The following are not parameters of actual schemes
|
||||
# but useful for benchmarking
|
||||
|
||||
# HElib
|
||||
# https://eprint.iacr.org/2017/047.pdf
|
||||
# Table 1, page 6
|
||||
# 80-bit security
|
||||
HElib80_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 47,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_1024",
|
||||
)
|
||||
|
||||
HElib80_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 87,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_2048",
|
||||
)
|
||||
|
||||
HElib80_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 167,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_4096",
|
||||
)
|
||||
|
||||
# 120-bit security
|
||||
HElib120_1024 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 38,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_1024",
|
||||
)
|
||||
|
||||
HElib120_2048 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 70,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_2048",
|
||||
)
|
||||
|
||||
HElib120_4096 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 134,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2),
|
||||
tag="HElib80_4096",
|
||||
)
|
||||
|
||||
|
||||
# Test parameters from CHHS
|
||||
# https://eprint.iacr.org/2019/1114.pdf
|
||||
# Table 4, page 18
|
||||
CHHS_1024_25 = LWEParameters(
|
||||
n=1024,
|
||||
q=2 ** 25,
|
||||
Xs=NoiseDistribution.SparseTernary(n=1024, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_1024_25",
|
||||
)
|
||||
|
||||
CHHS_2048_38 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 38,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_2048_38",
|
||||
)
|
||||
|
||||
CHHS_2048_45 = LWEParameters(
|
||||
n=2048,
|
||||
q=2 ** 45,
|
||||
Xs=NoiseDistribution.SparseTernary(n=2048, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_2048_45",
|
||||
)
|
||||
|
||||
CHHS_4096_67 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 67,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_4096_67",
|
||||
)
|
||||
|
||||
CHHS_4096_82 = LWEParameters(
|
||||
n=4096,
|
||||
q=2 ** 82,
|
||||
Xs=NoiseDistribution.SparseTernary(n=4096, p=32),
|
||||
Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)),
|
||||
tag="CHHS_4096_82",
|
||||
)
|
||||
72
estimator_new/simulator.py
Normal file
72
estimator_new/simulator.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Simulate lattice reduction on the rows of::
|
||||
|
||||
⌜ ξI A 0 ⌝
|
||||
ǀ 0 qI 0 |
|
||||
⌞ 0 c τ ⌟
|
||||
|
||||
where
|
||||
|
||||
- ξI ∈ ZZ^{n × n},
|
||||
- A ∈ ZZ_q^{n × m},
|
||||
- qI ∈ ZZ^{m × m},
|
||||
- τ ∈ ZZ and
|
||||
- d = m + n + 1.
|
||||
|
||||
The last row is optional.
|
||||
"""
|
||||
|
||||
from sage.all import RR, log
|
||||
|
||||
|
||||
def CN11(d, n, q, beta, xi=1, tau=1):
|
||||
from fpylll import BKZ
|
||||
from fpylll.tools.bkz_simulator import simulate
|
||||
|
||||
if tau is not None:
|
||||
r = [q ** 2] * (d - n - 1) + [xi ** 2] * n + [tau ** 2]
|
||||
else:
|
||||
r = [q ** 2] * (d - n) + [xi ** 2] * n
|
||||
|
||||
return simulate(r, BKZ.EasyParam(beta))[0]
|
||||
|
||||
|
||||
def GSA(d, n, q, beta, xi=1, tau=1):
|
||||
|
||||
"""Reduced lattice shape fallowing the Geometric Series Assumption [Schnorr03]_
|
||||
|
||||
:param d: Lattice dimension.
|
||||
:param n: Number of `q` vectors
|
||||
:param q: Modulus `q`
|
||||
:param beta: Block size β.
|
||||
:param xi: Scaling factor ξ for identity part.
|
||||
:param tau: Kannan factor τ.
|
||||
|
||||
"""
|
||||
from .reduction import delta as deltaf
|
||||
|
||||
if tau is not None:
|
||||
log_vol = RR(log(q, 2) * (d - n - 1) + log(xi, 2) * n + log(tau, 2))
|
||||
else:
|
||||
log_vol = RR(log(q, 2) * (d - n) + log(xi, 2) * n)
|
||||
|
||||
delta = deltaf(beta)
|
||||
r_log = [(d - 1 - 2 * i) * RR(log(delta, 2)) + log_vol / d for i in range(d)]
|
||||
r = [2 ** (2 * r_) for r_ in r_log]
|
||||
return r
|
||||
|
||||
|
||||
def normalize(name):
|
||||
if str(name).upper() == "CN11":
|
||||
return CN11
|
||||
elif str(name).upper() == "GSA":
|
||||
return GSA
|
||||
else:
|
||||
return name
|
||||
|
||||
|
||||
def plot_gso(r, *args, **kwds):
|
||||
from sage.all import line
|
||||
|
||||
return line([(i, log(r_, 2) / 2.0) for i, r_ in enumerate(r)], *args, **kwds)
|
||||
295
estimator_new/util.py
Normal file
295
estimator_new/util.py
Normal file
@@ -0,0 +1,295 @@
|
||||
from multiprocessing import Pool
|
||||
from functools import partial
|
||||
|
||||
from sage.all import ceil, floor
|
||||
|
||||
from .io import Logging
|
||||
|
||||
|
||||
class local_minimum_base:
|
||||
"""
|
||||
An iterator context for finding a local minimum using binary search.
|
||||
|
||||
We use the immediate neighborhood of a point to decide the next direction to go into (gradient
|
||||
descent style), so the algorithm is not plain binary search (see ``update()`` function.)
|
||||
|
||||
.. note :: We combine an iterator and a context to give the caller access to the result.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
start,
|
||||
stop,
|
||||
smallerf=lambda x, best: x <= best,
|
||||
suppress_bounds_warning=False,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Create a fresh local minimum search context.
|
||||
|
||||
:param start: starting point
|
||||
:param stop: end point (exclusive)
|
||||
:param smallerf: a function to decide if ``lhs`` is smaller than ``rhs``.
|
||||
:param suppress_bounds_warning: do not warn if a boundary is picked as optimal
|
||||
|
||||
"""
|
||||
|
||||
if stop < start:
|
||||
raise ValueError(f"Incorrect bounds {start} > {stop}.")
|
||||
|
||||
self._suppress_bounds_warning = suppress_bounds_warning
|
||||
self._log_level = log_level
|
||||
self._start = start
|
||||
self._stop = stop - 1
|
||||
self._initial_bounds = (start, stop - 1)
|
||||
self._smallerf = smallerf
|
||||
# abs(self._direction) == 2: binary search step
|
||||
# abs(self._direction) == 1: gradient descent direction
|
||||
self._direction = -1 # going down
|
||||
self._last_x = None
|
||||
self._next_x = self._stop
|
||||
self._best = (None, None)
|
||||
self._all_x = set()
|
||||
|
||||
def __enter__(self):
|
||||
""" """
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
""" """
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
""" """
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
abort = False
|
||||
if self._next_x is None:
|
||||
abort = True # we're told to abort
|
||||
elif self._next_x in self._all_x:
|
||||
abort = True # we're looping
|
||||
elif self._next_x < self._initial_bounds[0] or self._initial_bounds[1] < self._next_x:
|
||||
abort = True # we're stepping out of bounds
|
||||
|
||||
if not abort:
|
||||
self._last_x = self._next_x
|
||||
self._next_x = None
|
||||
return self._last_x
|
||||
|
||||
if self._best[0] in self._initial_bounds and not self._suppress_bounds_warning:
|
||||
# We warn the user if the optimal solution is at the edge and thus possibly not optimal.
|
||||
Logging.log(
|
||||
"bins",
|
||||
self._log_level,
|
||||
f'warning: "optimal" solution {self._best[0]} matches a bound ∈ {self._initial_bounds}.',
|
||||
)
|
||||
|
||||
raise StopIteration
|
||||
|
||||
@property
|
||||
def x(self):
|
||||
return self._best[0]
|
||||
|
||||
@property
|
||||
def y(self):
|
||||
return self._best[1]
|
||||
|
||||
def update(self, res):
|
||||
"""
|
||||
|
||||
TESTS:
|
||||
|
||||
We keep cache old inputs in ``_all_x`` to prevent infinite loops::
|
||||
|
||||
>>> from estimator.util import binary_search
|
||||
>>> from estimator.cost import Cost
|
||||
>>> f = lambda x, log_level=1: Cost(rop=1) if x >= 19 else Cost(rop=2)
|
||||
>>> binary_search(f, 10, 30, "x")
|
||||
rop: 1
|
||||
|
||||
"""
|
||||
|
||||
Logging.log("bins", self._log_level, f"({self._last_x}, {repr(res)})")
|
||||
|
||||
self._all_x.add(self._last_x)
|
||||
|
||||
# We got nothing yet
|
||||
if self._best[0] is None:
|
||||
self._best = self._last_x, res
|
||||
|
||||
# We found something better
|
||||
if res is not False and self._smallerf(res, self._best[1]):
|
||||
# store it
|
||||
self._best = self._last_x, res
|
||||
|
||||
# if it's a result of a long jump figure out the next direction
|
||||
if abs(self._direction) != 1:
|
||||
self._direction = -1
|
||||
self._next_x = self._last_x - 1
|
||||
# going down worked, so let's keep on doing that.
|
||||
elif self._direction == -1:
|
||||
self._direction = -2
|
||||
self._stop = self._last_x
|
||||
self._next_x = ceil((self._start + self._stop) / 2)
|
||||
# going up worked, so let's keep on doing that.
|
||||
elif self._direction == 1:
|
||||
self._direction = 2
|
||||
self._start = self._last_x
|
||||
self._next_x = floor((self._start + self._stop) / 2)
|
||||
else:
|
||||
# going downwards didn't help, let's try up
|
||||
if self._direction == -1:
|
||||
self._direction = 1
|
||||
self._next_x = self._last_x + 2
|
||||
# going up didn't help either, so we stop
|
||||
elif self._direction == 1:
|
||||
self._next_x = None
|
||||
# it got no better in a long jump, half the search space and try again
|
||||
elif self._direction == -2:
|
||||
self._start = self._last_x
|
||||
self._next_x = ceil((self._start + self._stop) / 2)
|
||||
elif self._direction == 2:
|
||||
self._stop = self._last_x
|
||||
self._next_x = floor((self._start + self._stop) / 2)
|
||||
|
||||
# We are repeating ourselves, time to stop
|
||||
if self._next_x == self._last_x:
|
||||
self._next_x = None
|
||||
|
||||
|
||||
class local_minimum(local_minimum_base):
|
||||
"""
|
||||
An iterator context for finding a local minimum using binary search.
|
||||
|
||||
We use the neighborhood of a point to decide the next direction to go into (gradient descent
|
||||
style), so the algorithm is not plain binary search (see ``update()`` function.)
|
||||
|
||||
We also zoom out by a factor ``precision``, find an approximate local minimum and then
|
||||
search the neighbourhood for the smallest value.
|
||||
|
||||
.. note :: We combine an iterator and a context to give the caller access to the result.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
start,
|
||||
stop,
|
||||
precision=1,
|
||||
smallerf=lambda x, best: x <= best,
|
||||
suppress_bounds_warning=False,
|
||||
log_level=5,
|
||||
):
|
||||
"""
|
||||
Create a fresh local minimum search context.
|
||||
|
||||
:param start: starting point
|
||||
:param stop: end point (exclusive)
|
||||
:param precision: only consider every ``precision``-th value in the main loop
|
||||
:param smallerf: a function to decide if ``lhs`` is smaller than ``rhs``.
|
||||
:param suppress_bounds_warning: do not warn if a boundary is picked as optimal
|
||||
|
||||
"""
|
||||
self._precision = precision
|
||||
self._orig_bounds = (start, stop)
|
||||
start = ceil(start / precision)
|
||||
stop = floor(stop / precision)
|
||||
local_minimum_base.__init__(self, start, stop, smallerf, suppress_bounds_warning, log_level)
|
||||
|
||||
def __next__(self):
|
||||
x = local_minimum_base.__next__(self)
|
||||
return x * self._precision
|
||||
|
||||
@property
|
||||
def x(self):
|
||||
return self._best[0] * self._precision
|
||||
|
||||
@property
|
||||
def neighborhood(self):
|
||||
"""
|
||||
An iterator over the neighborhood of the currently best value.
|
||||
"""
|
||||
|
||||
start, stop = self._orig_bounds
|
||||
|
||||
for x in range(max(start, self.x - self._precision), min(stop, self.x + self._precision)):
|
||||
yield x
|
||||
|
||||
|
||||
def binary_search(
|
||||
f, start, stop, param, step=1, smallerf=lambda x, best: x <= best, log_level=5, *args, **kwds
|
||||
):
|
||||
"""
|
||||
Searches for the best value in the interval [start,stop] depending on the given comparison function.
|
||||
|
||||
:param start: start of range to search
|
||||
:param stop: stop of range to search (exclusive)
|
||||
:param param: the parameter to modify when calling `f`
|
||||
:param smallerf: comparison is performed by evaluating ``smallerf(current, best)``
|
||||
:param step: initially only consider every `steps`-th value
|
||||
"""
|
||||
|
||||
with local_minimum(start, stop + 1, step, smallerf=smallerf, log_level=log_level) as it:
|
||||
for x in it:
|
||||
kwds_ = dict(kwds)
|
||||
kwds_[param] = x
|
||||
it.update(f(*args, **kwds_))
|
||||
|
||||
for x in it.neighborhood:
|
||||
kwds_ = dict(kwds)
|
||||
kwds_[param] = x
|
||||
it.update(f(*args, **kwds_))
|
||||
|
||||
return it.y
|
||||
|
||||
|
||||
def _batch_estimatef(f, x, log_level=0, f_repr=None):
|
||||
y = f(x)
|
||||
if f_repr is None:
|
||||
f_repr = repr(f)
|
||||
Logging.log("batch", log_level, f"f: {f_repr}")
|
||||
Logging.log("batch", log_level, f"x: {x}")
|
||||
Logging.log("batch", log_level, f"f(x): {repr(y)}")
|
||||
return y
|
||||
|
||||
|
||||
def f_name(f):
|
||||
try:
|
||||
return f.__name__
|
||||
except AttributeError:
|
||||
return repr(f)
|
||||
|
||||
|
||||
def batch_estimate(params, algorithm, jobs=1, log_level=0, **kwds):
|
||||
from .lwe_parameters import LWEParameters
|
||||
|
||||
if isinstance(params, LWEParameters):
|
||||
params = (params,)
|
||||
try:
|
||||
iter(algorithm)
|
||||
except TypeError:
|
||||
algorithm = (algorithm,)
|
||||
|
||||
tasks = []
|
||||
|
||||
for x in params:
|
||||
for f in algorithm:
|
||||
tasks.append((partial(f, **kwds), x, log_level, f_name(f)))
|
||||
|
||||
if jobs == 1:
|
||||
res = {}
|
||||
for f, x, lvl, f_repr in tasks:
|
||||
y = _batch_estimatef(f, x, lvl, f_repr)
|
||||
res[f_repr, x] = y
|
||||
else:
|
||||
pool = Pool(jobs)
|
||||
res = pool.starmap(_batch_estimatef, tasks)
|
||||
res = dict([((f_repr, x), res[i]) for i, (f, x, _, f_repr) in enumerate(tasks)])
|
||||
|
||||
ret = dict()
|
||||
for f, x in res:
|
||||
ret[x] = ret.get(x, dict())
|
||||
ret[x][f] = res[f, x]
|
||||
|
||||
return ret
|
||||
150
new_scripts.py
Normal file
150
new_scripts.py
Normal file
@@ -0,0 +1,150 @@
|
||||
## for now, run using sage new_scripts.py
|
||||
from estimator_new import *
|
||||
from math import log2
|
||||
|
||||
def estimate_lwe_nocrash(params):
|
||||
"""
|
||||
Retrieve an estimate using the Lattice Estimator, for a given set of input parameters
|
||||
:param params: the input LWE parameters
|
||||
"""
|
||||
try:
|
||||
estimate = LWE.estimate(params)
|
||||
except:
|
||||
estimate = 0
|
||||
return estimate
|
||||
|
||||
def get_security_level(estimate, dp = 2):
|
||||
"""
|
||||
Get the security level lambda from a Lattice Estimator output
|
||||
:param estimate: the Lattice Estimator output
|
||||
:param dp : the number of decimal places to consider
|
||||
"""
|
||||
attack_costs = []
|
||||
for key in estimate.keys():
|
||||
attack_costs.append(estimate[key]["rop"])
|
||||
# get the security level correct to 'dp' decimal places
|
||||
security_level = round(log2(min(attack_costs)), dp)
|
||||
return security_level
|
||||
|
||||
def inequality(x, y):
|
||||
""" A utility function which compresses the conditions x < y and x > y into a single condition via a multiplier
|
||||
:param x: the LHS of the inequality
|
||||
:param y: the RHS of the inequality
|
||||
"""
|
||||
if x <= y:
|
||||
return 1
|
||||
|
||||
if x > y:
|
||||
return -1
|
||||
|
||||
def automated_param_select_n(params, target_security=128):
|
||||
""" A function used to generate the smallest value of n which allows for
|
||||
target_security bits of security, for the input values of (params.Xe.stddev,params.q)
|
||||
:param params: the standard deviation of the error
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = automated_param_select_n(Kyber512, target_security = 128)
|
||||
sage: X
|
||||
456
|
||||
"""
|
||||
|
||||
# get an initial estimate
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
# determine if we are above or below the target security level
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
while z * security_level < z * target_security and params.n > 80:
|
||||
params = params.updated(n = params.n + z * 8)
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
if (-1 * params.Xe.stddev > 0):
|
||||
print("target security level is unatainable")
|
||||
break
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
params = params.updated(n = params.n - z * 8)
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(params.n,
|
||||
params.Xe.stddev,
|
||||
log2(params.q),
|
||||
security_level))
|
||||
|
||||
# final sanity check so we don't return insecure (or inf) parameters
|
||||
# TODO: figure out inf in new estimator
|
||||
if security_level < target_security: #or security_level == oo:
|
||||
params.update(n = None)
|
||||
|
||||
return params
|
||||
|
||||
def automated_param_select_sd(params, target_security=128):
|
||||
""" A function used to generate the smallest value of sd which allows for
|
||||
target_security bits of security, for the input values of (params.Xe.stddev,params.q)
|
||||
:param params: the standard deviation of the error
|
||||
:param secret_distribution: the LWE secret distribution
|
||||
:param target_security: the target number of bits of security, 128 is default
|
||||
|
||||
EXAMPLE:
|
||||
sage: X = automated_param_select_n(Kyber512, target_security = 128)
|
||||
sage: X
|
||||
456
|
||||
"""
|
||||
|
||||
#if sd is None:
|
||||
# pick some random sd which gets us close (based on concrete_LWE_params)
|
||||
# sd = round(n * 80 / (target_security * (-25)))
|
||||
|
||||
# make sure sd satisfies q * sd > 1
|
||||
# sd = max(sd, -(log(q,2) - 2))
|
||||
|
||||
#sd_ = (2 ** sd) * q
|
||||
#alpha = sqrt(2 * pi) * sd_ / RR(q)
|
||||
|
||||
# get an initial estimate
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
# determine if we are above or below the target security level
|
||||
z = inequality(security_level, target_security)
|
||||
|
||||
while z * security_level < z * target_security: #and sd > -log(q,2):
|
||||
|
||||
Xe_new = nd.NoiseDistribution.DiscreteGaussian(params.Xe.stddev + z * (0.5))
|
||||
params.updated(Xe = Xe_new)
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
if (params.Xe.stddev > log2(params.q)):
|
||||
print("target security level is unatainable")
|
||||
return None
|
||||
|
||||
# final estimate (we went too far in the above loop)
|
||||
if security_level < target_security:
|
||||
Xe_new = nd.NoiseDistribution.DiscreteGaussian(params.Xe.stddev - z * (0.5))
|
||||
params.updated(Xe = Xe_new)
|
||||
costs = estimate_lwe_nocrash(params)
|
||||
security_level = get_security_level(costs, 2)
|
||||
|
||||
print("the finalised parameters are n = {}, log2(sd) = {}, log2(q) = {}, with a security level of {}-bits".format(params.n,
|
||||
params.Xe.stddev,
|
||||
log2(params.q),
|
||||
security_level))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
|
||||
params = Kyber512
|
||||
|
||||
#x = estimate_lwe_nocrash(params)
|
||||
#y = get_security_level(x, 2)
|
||||
#print(y)
|
||||
#z1 = automated_param_select_n(Kyber512, 128)
|
||||
#print(z1)
|
||||
#z2 = automated_param_select_sd(Kyber512, 128)
|
||||
#print(z2)
|
||||
Reference in New Issue
Block a user