mirror of
https://github.com/0xPARC/plonkathon.git
synced 2026-01-09 13:48:00 -05:00
[cleanup] Make py_plonk the root directory
This commit is contained in:
@@ -1,62 +0,0 @@
|
||||
# Implementation of 99% fault tolerant consensus as decribed by Leslie Lamport
|
||||
# on page 391 of https://people.eecs.berkeley.edu/~luca/cs174/byzantine.pdf
|
||||
|
||||
import random
|
||||
from network import Network
|
||||
|
||||
TIMEOUT = 10
|
||||
LATENCY = 5
|
||||
# A node in the network (not including the "commander")
|
||||
class Node():
|
||||
|
||||
def __init__(self, network, id, honest=True):
|
||||
self.seen = {}
|
||||
self.id = id
|
||||
self.network = network
|
||||
self.network.add_node(self)
|
||||
self.honest = honest
|
||||
|
||||
# Upon receiving a message...
|
||||
def on_receive(self, msg):
|
||||
if self.id == 0 and len(msg) == 1:
|
||||
print('Seen', msg[0], self.network.time, 'already seen', sorted(self.seen.keys()))
|
||||
# Only proceed if v not in V_i...
|
||||
# Byzantine nodes ignore this rule 5% of the time
|
||||
if (msg[0] in self.seen) and (self.honest or random.random() < 0.95):
|
||||
return
|
||||
# Timeout logic (see page 399)
|
||||
# Byzantine nodes ignore this rule
|
||||
if (len(msg) * TIMEOUT < self.network.time) and self.honest:
|
||||
return
|
||||
self.seen[msg[0]] = True
|
||||
new_msg = msg + [self.id]
|
||||
# Broadcast v:0:j1...jk:i
|
||||
# Byzantine nodes delay broadcast to split honest receiving nodes by timeout
|
||||
self.network.broadcast(new_msg, at=self.network.time if self.honest else self.network.time + int(TIMEOUT / 1.5))
|
||||
|
||||
def choice(self):
|
||||
return max(self.seen.keys())
|
||||
|
||||
def test():
|
||||
n = Network(LATENCY * 2)
|
||||
nodes = [Node(n, i, i%4==0) for i in range(20)]
|
||||
for i in range(30):
|
||||
for _ in range(2):
|
||||
z = random.randrange(20)
|
||||
n.send_to([1000 + i], z, at=5+i*2)
|
||||
for i in range(21 * LATENCY):
|
||||
n.tick()
|
||||
if i % 10 == 0:
|
||||
print("Value sets", [sorted(node.seen.keys()) for node in nodes])
|
||||
countz = {}
|
||||
maxval = ""
|
||||
for node in nodes:
|
||||
if node.honest:
|
||||
k = str(sorted(node.seen.keys()))
|
||||
countz[k] = countz.get(k, 0) + 1
|
||||
if countz[k] > countz.get(maxval, 0):
|
||||
maxval = k
|
||||
print("Most popular: %s" % maxval, "with %d agreeing" % countz[maxval])
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
21
LICENSE
21
LICENSE
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2018 Vitalik Buterin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
66
README.md
66
README.md
@@ -1,15 +1,65 @@
|
||||
# Research
|
||||
# Py_plonk
|
||||
|
||||
This repository is used mainly for code related to specific research questions, mostly written by @vbuterin. It is not meant as a general research repository for academic papers.
|
||||
Py_plonk is a simple python implementation of the PLONK protocol as described in https://eprint.iacr.org/2019/953.pdf (see also https://vitalik.ca/general/2019/09/22/plonk.html), targeted to be close to compatible with the implementation at https://zkrepl.dev. Py_plonk includes:
|
||||
|
||||
An exception to this is the `papers` folder, which contains the LaTeX files for various academic papers.
|
||||
* A simple programming language for describing circuits, which it can compile into the forms needed for a PLONK proof (QL, QR, QM, QO, QC, S1, S2, S3 polynomials)
|
||||
* A prover that can generate proofs for this language, given a list of variable assignments
|
||||
* A verifier that can verify these proofs
|
||||
|
||||
## Contribute
|
||||
Full compatibility is achieved in some cases: for simple programs, py_plonk is capable of outputting verification keys that _exactly_ match https://zkrepl.dev output. See the tests in test.py for some examples.
|
||||
|
||||
While contributions are welcome, maintaining this repository is not an active priority. The code in this repository is offered as is, without active support.
|
||||
This implementation is intended for educational use, and to help reproduce and verify verification keys that are generated by other software. **IT HAS NOT BEEN AUDITED AND PROBABLY HAS BUGS, DO NOT USE BY ITSELF IN PRODUCTION.**
|
||||
|
||||
If you find spelling errors or have suggestions or comments, please feel free to open an issue.
|
||||
Many features are missing. The parts of PLONK that are responsible for ensuring strong privacy are left out (they are easy to add, they would simply increase complexity and reduce the educational value of this implementation).
|
||||
|
||||
## License
|
||||
### Example
|
||||
|
||||
[MIT](LICENSE) © 2015-2018 Vitalik Buterin
|
||||
Here is a program that lets you prove that you know two small numbers that multiply to a given number (in our example we'll use 91) without revealing what those numbers are:
|
||||
|
||||
```
|
||||
n public
|
||||
pb0 === pb0 * pb0
|
||||
pb1 === pb1 * pb1
|
||||
pb2 === pb2 * pb2
|
||||
pb3 === pb3 * pb3
|
||||
qb0 === qb0 * qb0
|
||||
qb1 === qb1 * qb1
|
||||
qb2 === qb2 * qb2
|
||||
qb3 === qb3 * qb3
|
||||
pb01 <== pb0 + 2 * pb1
|
||||
pb012 <== pb01 + 4 * pb2
|
||||
p <== pb012 + 8 * pb3
|
||||
qb01 <== qb0 + 2 * qb1
|
||||
qb012 <== qb01 + 4 * qb2
|
||||
q <== qb012 + 8 * qb3
|
||||
n <== p * q
|
||||
```
|
||||
|
||||
Generating the verification key:
|
||||
|
||||
```
|
||||
setup = utils.Setup.from_file(SETUP_FILENAME)
|
||||
vk = make_verification_key(setup, 16, string_containing_the_above_code)
|
||||
```
|
||||
|
||||
A setup file is in this repo. The second argument is the group order; it should be a power of two, at least 8, and at least the number of lines of code in the program.
|
||||
|
||||
Proving:
|
||||
|
||||
```
|
||||
assignments = compiler.fill_variable_assignments(eqs, {
|
||||
'pb3': 1, 'pb2': 1, 'pb1': 0, 'pb0': 1,
|
||||
'qb3': 0, 'qb2': 1, 'qb1': 1, 'qb0': 1,
|
||||
})
|
||||
proof = prover.prove_from_witness(setup, 16, eqs, assignments)
|
||||
```
|
||||
|
||||
`compiler.fill_variable_assignments` is a convenience method that executes the program to fill in any variables that you do not specify yourself. In this case, you only specify the bits of the two factors (7 and 13), the program can compute all the other intermediate and final variables for you.
|
||||
|
||||
`prove_from_witness` generates a proof, passing in the full set of assignments as input.
|
||||
|
||||
Verifying:
|
||||
|
||||
```
|
||||
assert verifier.verify_proof(setup, 16, vk, proof, [91], optimized=True)
|
||||
```
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
from random import randint, shuffle, choice
|
||||
from poly_utils import PrimeField
|
||||
|
||||
MODULUS = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001
|
||||
PRIMITIVE_ROOT = 7
|
||||
|
||||
assert pow(PRIMITIVE_ROOT, (MODULUS - 1) // 2, MODULUS) != 1
|
||||
assert pow(PRIMITIVE_ROOT, MODULUS - 1, MODULUS) == 1
|
||||
|
||||
primefield = PrimeField(MODULUS)
|
||||
|
||||
WIDTH = 8
|
||||
|
||||
ROOT_OF_UNITY = pow(PRIMITIVE_ROOT, (MODULUS - 1) // WIDTH, MODULUS)
|
||||
DOMAIN = [pow(ROOT_OF_UNITY, i, MODULUS) for i in range(WIDTH)]
|
||||
|
||||
def check(f):
|
||||
result = 0
|
||||
r = randint(0, MODULUS)
|
||||
rn2 = pow(r, WIDTH // 2, MODULUS)
|
||||
|
||||
for i in range(WIDTH):
|
||||
summand = f[i] * ((-1)**i * rn2 - 1)
|
||||
summand = primefield.div(summand, DOMAIN[i * (WIDTH // 2 - 1) % WIDTH] * (r - DOMAIN[i]))
|
||||
result += summand
|
||||
|
||||
return result % MODULUS
|
||||
|
||||
fc = [randint(0, MODULUS) for i in range(4)]
|
||||
f = [primefield.eval_poly_at(fc, x) for x in DOMAIN]
|
||||
|
||||
print(check(f))
|
||||
@@ -1,207 +0,0 @@
|
||||
# Creates an object that includes convenience operations for numbers
|
||||
# and polynomials in some prime field
|
||||
class PrimeField():
|
||||
def __init__(self, modulus):
|
||||
assert pow(2, modulus, modulus) == 2
|
||||
self.modulus = modulus
|
||||
|
||||
def add(self, x, y):
|
||||
return (x+y) % self.modulus
|
||||
|
||||
def sub(self, x, y):
|
||||
return (x-y) % self.modulus
|
||||
|
||||
def mul(self, x, y):
|
||||
return (x*y) % self.modulus
|
||||
|
||||
def exp(self, x, p):
|
||||
return pow(x, p, self.modulus)
|
||||
|
||||
# Modular inverse using the extended Euclidean algorithm
|
||||
def inv(self, a):
|
||||
if a == 0:
|
||||
return 0
|
||||
lm, hm = 1, 0
|
||||
low, high = a % self.modulus, self.modulus
|
||||
while low > 1:
|
||||
r = high//low
|
||||
nm, new = hm-lm*r, high-low*r
|
||||
lm, low, hm, high = nm, new, lm, low
|
||||
return lm % self.modulus
|
||||
|
||||
def multi_inv(self, values):
|
||||
partials = [1]
|
||||
for i in range(len(values)):
|
||||
partials.append(self.mul(partials[-1], values[i] or 1))
|
||||
inv = self.inv(partials[-1])
|
||||
outputs = [0] * len(values)
|
||||
for i in range(len(values), 0, -1):
|
||||
outputs[i-1] = self.mul(partials[i-1], inv) if values[i-1] else 0
|
||||
inv = self.mul(inv, values[i-1] or 1)
|
||||
return outputs
|
||||
|
||||
def div(self, x, y):
|
||||
return self.mul(x, self.inv(y))
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(self, p, x):
|
||||
y = 0
|
||||
power_of_x = 1
|
||||
for i, p_coeff in enumerate(p):
|
||||
y += power_of_x * p_coeff
|
||||
power_of_x = (power_of_x * x) % self.modulus
|
||||
return y % self.modulus
|
||||
|
||||
# Arithmetic for polynomials
|
||||
def add_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) + (b[i] if i < len(b) else 0))
|
||||
% self.modulus for i in range(max(len(a), len(b)))]
|
||||
|
||||
def sub_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) - (b[i] if i < len(b) else 0))
|
||||
% self.modulus for i in range(max(len(a), len(b)))]
|
||||
|
||||
def mul_by_const(self, a, c):
|
||||
return [(x*c) % self.modulus for x in a]
|
||||
|
||||
def mul_polys(self, a, b):
|
||||
o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] += a[i] * b[j]
|
||||
return [x % self.modulus for x in o]
|
||||
|
||||
def div_polys(self, a, b):
|
||||
assert len(a) >= len(b)
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = self.div(a[apos], b[bpos])
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] -= b[i] * quot
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return [x % self.modulus for x in o]
|
||||
|
||||
def mod_polys(self, a, b):
|
||||
return self.sub_polys(a, self.mul_polys(b, self.div_polys(a, b)))[:len(b)-1]
|
||||
|
||||
# Build a polynomial from a few coefficients
|
||||
def sparse(self, coeff_dict):
|
||||
o = [0] * (max(coeff_dict.keys()) + 1)
|
||||
for k, v in coeff_dict.items():
|
||||
o[k] = v % self.modulus
|
||||
return o
|
||||
|
||||
# Build a polynomial that returns 0 at all specified xs
|
||||
def zpoly(self, xs):
|
||||
root = [1]
|
||||
for x in xs:
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
root[j] -= root[j+1] * x
|
||||
return [x % self.modulus for x in root]
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(self, xs, ys):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = self.zpoly(xs)
|
||||
assert len(root) == len(ys) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
nums = [self.div_polys(root, [-x, 1]) for x in xs]
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = [self.eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
invdenoms = self.multi_inv(denoms)
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
b = [0 for y in ys]
|
||||
for i in range(len(xs)):
|
||||
yslice = self.mul(ys[i], invdenoms[i])
|
||||
for j in range(len(ys)):
|
||||
if nums[i][j] and ys[i]:
|
||||
b[j] += nums[i][j] * yslice
|
||||
return [x % self.modulus for x in b]
|
||||
|
||||
# Optimized poly evaluation for degree 4
|
||||
def eval_quartic(self, p, x):
|
||||
xsq = x * x % self.modulus
|
||||
xcb = xsq * x
|
||||
return (p[0] + p[1] * x + p[2] * xsq + p[3] * xcb) % self.modulus
|
||||
|
||||
# Optimized version of the above restricted to deg-4 polynomials
|
||||
def lagrange_interp_4(self, xs, ys):
|
||||
x01, x02, x03, x12, x13, x23 = \
|
||||
xs[0] * xs[1], xs[0] * xs[2], xs[0] * xs[3], xs[1] * xs[2], xs[1] * xs[3], xs[2] * xs[3]
|
||||
m = self.modulus
|
||||
eq0 = [-x12 * xs[3] % m, (x12 + x13 + x23), -xs[1]-xs[2]-xs[3], 1]
|
||||
eq1 = [-x02 * xs[3] % m, (x02 + x03 + x23), -xs[0]-xs[2]-xs[3], 1]
|
||||
eq2 = [-x01 * xs[3] % m, (x01 + x03 + x13), -xs[0]-xs[1]-xs[3], 1]
|
||||
eq3 = [-x01 * xs[2] % m, (x01 + x02 + x12), -xs[0]-xs[1]-xs[2], 1]
|
||||
e0 = self.eval_poly_at(eq0, xs[0])
|
||||
e1 = self.eval_poly_at(eq1, xs[1])
|
||||
e2 = self.eval_poly_at(eq2, xs[2])
|
||||
e3 = self.eval_poly_at(eq3, xs[3])
|
||||
e01 = e0 * e1
|
||||
e23 = e2 * e3
|
||||
invall = self.inv(e01 * e23)
|
||||
inv_y0 = ys[0] * invall * e1 * e23 % m
|
||||
inv_y1 = ys[1] * invall * e0 * e23 % m
|
||||
inv_y2 = ys[2] * invall * e01 * e3 % m
|
||||
inv_y3 = ys[3] * invall * e01 * e2 % m
|
||||
return [(eq0[i] * inv_y0 + eq1[i] * inv_y1 + eq2[i] * inv_y2 + eq3[i] * inv_y3) % m for i in range(4)]
|
||||
|
||||
# Optimized version of the above restricted to deg-2 polynomials
|
||||
def lagrange_interp_2(self, xs, ys):
|
||||
m = self.modulus
|
||||
eq0 = [-xs[1] % m, 1]
|
||||
eq1 = [-xs[0] % m, 1]
|
||||
e0 = self.eval_poly_at(eq0, xs[0])
|
||||
e1 = self.eval_poly_at(eq1, xs[1])
|
||||
invall = self.inv(e0 * e1)
|
||||
inv_y0 = ys[0] * invall * e1
|
||||
inv_y1 = ys[1] * invall * e0
|
||||
return [(eq0[i] * inv_y0 + eq1[i] * inv_y1) % m for i in range(2)]
|
||||
|
||||
# Optimized version of the above restricted to deg-4 polynomials
|
||||
def multi_interp_4(self, xsets, ysets):
|
||||
data = []
|
||||
invtargets = []
|
||||
for xs, ys in zip(xsets, ysets):
|
||||
x01, x02, x03, x12, x13, x23 = \
|
||||
xs[0] * xs[1], xs[0] * xs[2], xs[0] * xs[3], xs[1] * xs[2], xs[1] * xs[3], xs[2] * xs[3]
|
||||
m = self.modulus
|
||||
eq0 = [-x12 * xs[3] % m, (x12 + x13 + x23), -xs[1]-xs[2]-xs[3], 1]
|
||||
eq1 = [-x02 * xs[3] % m, (x02 + x03 + x23), -xs[0]-xs[2]-xs[3], 1]
|
||||
eq2 = [-x01 * xs[3] % m, (x01 + x03 + x13), -xs[0]-xs[1]-xs[3], 1]
|
||||
eq3 = [-x01 * xs[2] % m, (x01 + x02 + x12), -xs[0]-xs[1]-xs[2], 1]
|
||||
e0 = self.eval_quartic(eq0, xs[0])
|
||||
e1 = self.eval_quartic(eq1, xs[1])
|
||||
e2 = self.eval_quartic(eq2, xs[2])
|
||||
e3 = self.eval_quartic(eq3, xs[3])
|
||||
data.append([ys, eq0, eq1, eq2, eq3])
|
||||
invtargets.extend([e0, e1, e2, e3])
|
||||
invalls = self.multi_inv(invtargets)
|
||||
o = []
|
||||
for (i, (ys, eq0, eq1, eq2, eq3)) in enumerate(data):
|
||||
invallz = invalls[i*4:i*4+4]
|
||||
inv_y0 = ys[0] * invallz[0] % m
|
||||
inv_y1 = ys[1] * invallz[1] % m
|
||||
inv_y2 = ys[2] * invallz[2] % m
|
||||
inv_y3 = ys[3] * invallz[3] % m
|
||||
o.append([(eq0[i] * inv_y0 + eq1[i] * inv_y1 + eq2[i] * inv_y2 + eq3[i] * inv_y3) % m for i in range(4)])
|
||||
# assert o == [self.lagrange_interp_4(xs, ys) for xs, ys in zip(xsets, ysets)]
|
||||
return o
|
||||
@@ -1,95 +0,0 @@
|
||||
try:
|
||||
from hashlib import blake2s
|
||||
except:
|
||||
from pyblake2 import blake2s
|
||||
blake = lambda x: blake2s(x).digest()
|
||||
from py_ecc.optimized_bn128 import G1, G2, neg, add, multiply, FQ, FQ2, FQ12, pairing, \
|
||||
normalize, field_modulus, b, b2, is_on_curve, curve_order, final_exponentiate
|
||||
|
||||
def compress_G1(pt):
|
||||
x, y = normalize(pt)
|
||||
return x.n + 2**255 * (y.n % 2)
|
||||
|
||||
def decompress_G1(p):
|
||||
if p == 0:
|
||||
return (FQ(1), FQ(1), FQ(0))
|
||||
x = p % 2**255
|
||||
y_mod_2 = p // 2**255
|
||||
y = pow((x**3 + b.n) % field_modulus, (field_modulus+1)//4, field_modulus)
|
||||
assert pow(y, 2, field_modulus) == (x**3 + b.n) % field_modulus
|
||||
if y%2 != y_mod_2:
|
||||
y = field_modulus - y
|
||||
return (FQ(x), FQ(y), FQ(1))
|
||||
|
||||
# 16th root of unity
|
||||
hex_root = FQ2([21573744529824266246521972077326577680729363968861965890554801909984373949499,
|
||||
16854739155576650954933913186877292401521110422362946064090026408937773542853])
|
||||
|
||||
assert hex_root ** 8 != FQ2([1,0])
|
||||
assert hex_root ** 16 == FQ2([1,0])
|
||||
|
||||
def sqrt_fq2(x):
|
||||
y = x ** ((field_modulus ** 2 + 15) // 32)
|
||||
while y**2 != x:
|
||||
y *= hex_root
|
||||
return y
|
||||
|
||||
cache = {}
|
||||
|
||||
def hash_to_G2(m):
|
||||
if m in cache:
|
||||
return cache[m]
|
||||
k2 = m
|
||||
while 1:
|
||||
k1 = blake(k2)
|
||||
k2 = blake(k1)
|
||||
x1 = int.from_bytes(k1, 'big') % field_modulus
|
||||
x2 = int.from_bytes(k2, 'big') % field_modulus
|
||||
x = FQ2([x1, x2])
|
||||
xcb = x**3 + b2
|
||||
if xcb ** ((field_modulus ** 2 - 1) // 2) == FQ2([1,0]):
|
||||
break
|
||||
y = sqrt_fq2(xcb)
|
||||
o = multiply((x, y, FQ2([1,0])), 2*field_modulus-curve_order)
|
||||
cache[m] = o
|
||||
return o
|
||||
|
||||
def compress_G2(pt):
|
||||
assert is_on_curve(pt, b2)
|
||||
x, y = normalize(pt)
|
||||
return (x.coeffs[0] + 2**255 * (y.coeffs[0] % 2), x.coeffs[1])
|
||||
|
||||
def decompress_G2(p):
|
||||
x1 = p[0] % 2**255
|
||||
y1_mod_2 = p[0] // 2**255
|
||||
x2 = p[1]
|
||||
x = FQ2([x1, x2])
|
||||
if x == FQ2([0, 0]):
|
||||
return FQ2([1,0]), FQ2([1,0]), FQ2([0,0])
|
||||
y = sqrt_fq2(x**3 + b2)
|
||||
if y.coeffs[0] % 2 != y1_mod_2:
|
||||
y = y * -1
|
||||
assert is_on_curve((x, y, FQ2([1,0])), b2)
|
||||
return x, y, FQ2([1,0])
|
||||
|
||||
def sign(m, k):
|
||||
return compress_G2(multiply(hash_to_G2(m), k))
|
||||
|
||||
def privtopub(k):
|
||||
return compress_G1(multiply(G1, k))
|
||||
|
||||
def verify(m, pub, sig):
|
||||
return final_exponentiate(pairing(decompress_G2(sig), G1, False) * \
|
||||
pairing(hash_to_G2(m), neg(decompress_G1(pub)), False)) == FQ12.one()
|
||||
|
||||
def aggregate_sigs(sigs):
|
||||
o = FQ2([1,0]), FQ2([1,0]), FQ2([0,0])
|
||||
for s in sigs:
|
||||
o = add(o, decompress_G2(s))
|
||||
return compress_G2(o)
|
||||
|
||||
def aggregate_pubs(pubs):
|
||||
o = FQ(1), FQ(1), FQ(0)
|
||||
for p in pubs:
|
||||
o = add(o, decompress_G1(p))
|
||||
return compress_G1(o)
|
||||
@@ -1,513 +0,0 @@
|
||||
try:
|
||||
from hashlib import blake2s
|
||||
except:
|
||||
from pyblake2 import blake2s
|
||||
blake = lambda x: blake2s(x).digest()
|
||||
import bls
|
||||
import random
|
||||
from bls import decompress_G1, aggregate_pubs, verify, sign, privtopub
|
||||
from simpleserialize import deepcopy, serialize, to_dict
|
||||
|
||||
|
||||
SHARD_COUNT = 20
|
||||
ATTESTER_COUNT = 32
|
||||
DEFAULT_BALANCE = 20000
|
||||
|
||||
class AggregateVote():
|
||||
fields = {
|
||||
'shard_id': 'int16',
|
||||
'shard_block_hash': 'hash32',
|
||||
'signer_bitmask': 'bytes',
|
||||
'aggregate_sig': ['int256']
|
||||
}
|
||||
defaults = {
|
||||
'shard_id': 0,
|
||||
'shard_block_hash': b'\x00'*32,
|
||||
'signer_bitmask': b'',
|
||||
'aggregate_sig': [0,0],
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
class Block():
|
||||
|
||||
fields = {
|
||||
# Hash of the parent block
|
||||
'parent_hash': 'hash32',
|
||||
# Number of skips (for the full PoS mechanism)
|
||||
'skip_count': 'int64',
|
||||
# Randao commitment reveal
|
||||
'randao_reveal': 'hash32',
|
||||
# Bitmask of who participated in the block notarization committee
|
||||
'attestation_bitmask': 'bytes',
|
||||
# Their aggregate sig
|
||||
'attestation_aggregate_sig': ['int256'],
|
||||
# Shard aggregate votes
|
||||
'shard_aggregate_votes': [AggregateVote],
|
||||
# Reference to main chain block
|
||||
'main_chain_ref': 'hash32',
|
||||
# Hash of the state
|
||||
'state_hash': 'bytes',
|
||||
# Signature from signer
|
||||
'sig': ['int256']
|
||||
}
|
||||
|
||||
defaults = {
|
||||
'parent_hash': b'\x00'*32,
|
||||
'skip_count': 0,
|
||||
'randao_reveal': b'\x00'*32,
|
||||
'attestation_bitmask': b'',
|
||||
'attestation_aggregate_sig': [0,0],
|
||||
'shard_aggregate_votes': [],
|
||||
'main_chain_ref': b'\x00'*32,
|
||||
'state_hash': b'\x00'*32,
|
||||
'sig': [0,0]
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
def sign(self, key):
|
||||
self.sig = [0,0]
|
||||
self.sig = list(sign(serialize(self), key))
|
||||
|
||||
def verify(self, pub):
|
||||
zig = self.sig
|
||||
self.sig = [0,0]
|
||||
o = verify(serialize(self), pub, tuple(zig))
|
||||
self.sig = zig
|
||||
return o
|
||||
|
||||
def get_shuffling(seed, validator_count, sample=None):
|
||||
assert validator_count <= 16777216
|
||||
rand_max = 16777216 - 16777216 % validator_count
|
||||
o = list(range(validator_count)); source = seed
|
||||
i = 0
|
||||
maxvalue = sample if sample is not None else validator_count
|
||||
while i < maxvalue:
|
||||
source = blake(source)
|
||||
for pos in range(0, 30, 3):
|
||||
m = int.from_bytes(source[pos:pos+3], 'big')
|
||||
remaining = validator_count - i
|
||||
if remaining == 0:
|
||||
break
|
||||
if validator_count < rand_max:
|
||||
replacement_pos = (m % remaining) + i
|
||||
o[i], o[replacement_pos] = o[replacement_pos], o[i]
|
||||
i += 1
|
||||
return o[:maxvalue]
|
||||
|
||||
class ValidatorRecord():
|
||||
fields = {
|
||||
# The validator's public key
|
||||
'pubkey': 'int256',
|
||||
# What shard the validator's balance will be sent to after withdrawal
|
||||
'return_shard': 'int16',
|
||||
# And what address
|
||||
'return_address': 'address',
|
||||
# The validator's current RANDAO beacon commitment
|
||||
'randao_commitment': 'hash32',
|
||||
# Current balance
|
||||
'balance': 'int64',
|
||||
# Dynasty where the validator can (be inducted | be removed | withdraw)
|
||||
'switch_dynasty': 'int64'
|
||||
}
|
||||
defaults = {}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
class PartialCrosslinkRecord():
|
||||
|
||||
fields = {
|
||||
# What shard is the crosslink being made for
|
||||
'shard_id': 'int16',
|
||||
# Hash of the block
|
||||
'shard_block_hash': 'hash32',
|
||||
# Which of the eligible voters are voting for it (as a bitmask)
|
||||
'voter_bitmask': 'bytes'
|
||||
}
|
||||
defaults = {}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults, k
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
|
||||
class ActiveState():
|
||||
|
||||
fields = {
|
||||
# Block height
|
||||
'height': 'int64',
|
||||
# Global RANDAO beacon state
|
||||
'randao': 'hash32',
|
||||
# Which validators have made FFG votes this epoch (as a bitmask)
|
||||
'ffg_voter_bitmask': 'bytes',
|
||||
# Deltas to validator balances (to be processed at end of epoch)
|
||||
'balance_deltas': ['int48'],
|
||||
# Storing data about crosslinks-in-progress attempted in this epoch
|
||||
'partial_crosslinks': [PartialCrosslinkRecord],
|
||||
# Total number of skips (used to determine minimum timestamp)
|
||||
'total_skip_count': 'int64'
|
||||
}
|
||||
defaults = {'height': 0, 'randao': b'\x00'*32,
|
||||
'ffg_voter_bitmask': b'', 'balance_deltas': [],
|
||||
'partial_crosslinks': [], 'total_skip_count': 0}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
class CrosslinkRecord():
|
||||
fields = {
|
||||
# What epoch the crosslink was submitted in
|
||||
'epoch': 'int64',
|
||||
# The block hash
|
||||
'hash': 'hash32'
|
||||
}
|
||||
defaults = {'epoch': 0, 'hash': b'\x00'*32}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
class CrystallizedState():
|
||||
fields = {
|
||||
# List of active validators
|
||||
'active_validators': [ValidatorRecord],
|
||||
# List of joined but not yet inducted validators
|
||||
'queued_validators': [ValidatorRecord],
|
||||
# List of removed validators pending withdrawal
|
||||
'exited_validators': [ValidatorRecord],
|
||||
# The permutation of validators used to determine who cross-links
|
||||
# what shard in this epoch
|
||||
'current_shuffling': ['int24'],
|
||||
# The current epoch
|
||||
'current_epoch': 'int64',
|
||||
# The last justified epoch
|
||||
'last_justified_epoch': 'int64',
|
||||
# The last finalized epoch
|
||||
'last_finalized_epoch': 'int64',
|
||||
# The current dynasty
|
||||
'dynasty': 'int64',
|
||||
# The next shard that assignment for cross-linking will start from
|
||||
'next_shard': 'int16',
|
||||
# The current FFG checkpoint
|
||||
'current_checkpoint': 'hash32',
|
||||
# Records about the most recent crosslink for each shard
|
||||
'crosslink_records': [CrosslinkRecord],
|
||||
# Total balance of deposits
|
||||
'total_deposits': 'int256'
|
||||
}
|
||||
defaults = {'active_validators': [],
|
||||
'queued_validators': [],
|
||||
'exited_validators': [],
|
||||
'current_shuffling': ['int24'],
|
||||
'current_epoch': 0,
|
||||
'last_justified_epoch': 0,
|
||||
'last_finalized_epoch': 0,
|
||||
'dynasty': 0,
|
||||
'next_shard': 0,
|
||||
'current_checkpoint': b'\x00'*32,
|
||||
'crosslink_records': [],
|
||||
'total_deposits': 0}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k in self.fields.keys():
|
||||
assert k in kwargs or k in self.defaults
|
||||
setattr(self, k, kwargs.get(k, self.defaults.get(k)))
|
||||
|
||||
def get_crosslink_aggvote_msg(shard_id, shard_block_hash, crystallized_state):
|
||||
return shard_id.to_bytes(2, 'big') + \
|
||||
shard_block_hash + \
|
||||
crystallized_state.current_checkpoint + \
|
||||
crystallized_state.current_epoch.to_bytes(8, 'big') + \
|
||||
crystallized_state.last_justified_epoch.to_bytes(8, 'big')
|
||||
|
||||
def get_attesters_and_signer(crystallized_state, active_state, skip_count):
|
||||
attestation_count = min(len(crystallized_state.active_validators), ATTESTER_COUNT)
|
||||
indices = get_shuffling(active_state.randao, len(crystallized_state.active_validators),
|
||||
attestation_count + skip_count + 1)
|
||||
return indices[:attestation_count], indices[-1]
|
||||
|
||||
def get_shard_attesters(crystallized_state, shard_id):
|
||||
vc = len(crystallized_state.active_validators)
|
||||
return crystallized_state.current_shuffling[(vc * shard_id) // SHARD_COUNT: (vc * (shard_id + 1)) // SHARD_COUNT]
|
||||
|
||||
# Get rewards and vote data
|
||||
def process_ffg_deposits(crystallized_state, ffg_voter_bitmask):
|
||||
total_validators = len(crystallized_state.active_validators)
|
||||
finality_distance = crystallized_state.current_epoch - crystallized_state.last_finalized_epoch
|
||||
online_reward = 6 if finality_distance <= 2 else 0
|
||||
offline_penalty = 3 * finality_distance
|
||||
total_vote_count = 0
|
||||
total_vote_deposits = 0
|
||||
deltas = [0] * total_validators
|
||||
for i in range(total_validators):
|
||||
if ffg_voter_bitmask[i // 8] & (128 >> (i % 8)):
|
||||
total_vote_deposits += crystallized_state.active_validators[i].balance
|
||||
deltas[i] += online_reward
|
||||
total_vote_count += 1
|
||||
else:
|
||||
deltas[i] -= offline_penalty
|
||||
print('Total voted: %d of %d validators (%.2f%%), %d of %d deposits (%.2f%%)' %
|
||||
(total_vote_count, total_validators, total_vote_count * 100 / total_validators,
|
||||
total_vote_deposits, crystallized_state.total_deposits, total_vote_deposits * 100 / crystallized_state.total_deposits))
|
||||
print('FFG online reward: %d, offline penalty: %d' % (online_reward, offline_penalty))
|
||||
print('Total deposit change from FFG: %d' % sum(deltas))
|
||||
# Check if we need to justify and finalize
|
||||
justify = total_vote_deposits * 3 >= crystallized_state.total_deposits * 2
|
||||
finalize = False
|
||||
if justify:
|
||||
print('Justifying last epoch')
|
||||
if crystallized_state.last_justified_epoch == crystallized_state.current_epoch - 1:
|
||||
finalize = True
|
||||
print('Finalizing last epoch')
|
||||
return deltas, total_vote_count, total_vote_deposits, justify, finalize
|
||||
|
||||
# Process rewards from crosslinks
|
||||
def process_crosslinks(crystallized_state, crosslinks):
|
||||
# Find the most popular crosslink in each shard
|
||||
main_crosslink = {}
|
||||
for c in crosslinks:
|
||||
vote_count = 0
|
||||
mask = bytearray(c.voter_bitmask)
|
||||
for byte in mask:
|
||||
for j in range(8):
|
||||
vote_count += (byte >> j) % 2
|
||||
if vote_count > main_crosslink.get(c.shard_id, (b'', 0, b''))[1]:
|
||||
main_crosslink[c.shard_id] = (c.shard_block_hash, vote_count, mask)
|
||||
# Adjust crosslinks
|
||||
new_crosslink_records = [x for x in crystallized_state.crosslink_records]
|
||||
deltas = [0] * len(crystallized_state.active_validators)
|
||||
# Process shard by shard...
|
||||
for shard in range(SHARD_COUNT):
|
||||
indices = get_shard_attesters(crystallized_state, shard)
|
||||
# Get info about the dominant crosslink for this shard
|
||||
h, votes, mask = main_crosslink.get(shard, (b'', 0, bytearray((len(indices)+7)//8)))
|
||||
# Calculate rewards for participants and penalties for non-participants
|
||||
crosslink_distance = crystallized_state.current_epoch - crystallized_state.crosslink_records[shard].epoch
|
||||
online_reward = 3 if crosslink_distance <= 2 else 0
|
||||
offline_penalty = crosslink_distance * 2
|
||||
# Go through participants and evaluate rewards/penalties
|
||||
for i, index in enumerate(indices):
|
||||
if mask[i//8] & (1 << (i % 8)):
|
||||
deltas[i] += online_reward
|
||||
else:
|
||||
deltas[i] -= offline_penalty
|
||||
print('Shard %d: most recent crosslink %d, reward: (%d, %d), votes: %d of %d (%.2f%%)'
|
||||
% (shard, crystallized_state.crosslink_records[shard].epoch, online_reward, -offline_penalty,
|
||||
votes, len(indices), votes * 100 / len(indices)))
|
||||
# New crosslink
|
||||
if votes * 3 >= len(indices) * 2:
|
||||
new_crosslink_records[shard] = CrosslinkRecord(hash=h, epoch=crystallized_state.current_epoch)
|
||||
print('New crosslink %s' % hex(int.from_bytes(h, 'big')))
|
||||
print('Total deposit change from crosslinks: %d' % sum(deltas))
|
||||
return deltas, new_crosslink_records
|
||||
|
||||
def process_balance_deltas(crystallized_state, balance_deltas):
|
||||
deltas = [0] * len(crystallized_state.active_validators)
|
||||
for i in balance_deltas:
|
||||
if i % 16777216 < 8388608:
|
||||
deltas[i >> 24] += i & 16777215
|
||||
else:
|
||||
deltas[i >> 24] += (i & 16777215) - 16777216
|
||||
print('Total deposit change from deltas: %d' % sum(deltas))
|
||||
return deltas
|
||||
|
||||
def get_incremented_validator_sets(crystallized_state, new_active_validators):
|
||||
new_active_validators = [v for v in new_active_validators]
|
||||
new_exited_validators = [v for v in crystallized_state.exited_validators]
|
||||
i = 0
|
||||
while i < len(new_active_validators):
|
||||
if new_active_validators[i].balance <= DEFAULT_BALANCE // 2:
|
||||
new_exited_validators.append(new_active_validators.pop(i))
|
||||
elif new_active_validators[i].switch_dynasty == crystallized_state.dynasty + 1:
|
||||
new_exited_validators.append(new_active_validators.pop(i))
|
||||
else:
|
||||
i += 1
|
||||
induct = min(len(crystallized_state.queued_validators), len(crystallized_state.active_validators) // 30 + 1)
|
||||
for i in range(induct):
|
||||
if crystallized_state.queued_validators[i].switch_dynasty > crystallized_state.dynasty + 1:
|
||||
induct = i
|
||||
break
|
||||
new_active_validators.append(crystallized_state.queued_validators[i])
|
||||
new_queued_validators = crystallized_state.queued_validators[induct:]
|
||||
return new_queued_validators, new_active_validators, new_exited_validators
|
||||
|
||||
def process_attestations(validator_set, attestation_indices, attestation_bitmask, msg, aggregate_sig):
|
||||
# Verify the attestations of the parent
|
||||
pubs = []
|
||||
balance_deltas = []
|
||||
assert len(attestation_bitmask) == (len(attestation_indices) + 7) // 8
|
||||
for i, index in enumerate(attestation_indices):
|
||||
if attestation_bitmask[i//8] & (128>>(i%8)):
|
||||
pubs.append(validator_set[index].pubkey)
|
||||
balance_deltas.append((index << 24) + 1)
|
||||
assert len(balance_deltas) <= 128
|
||||
assert verify(msg, aggregate_pubs(pubs), aggregate_sig)
|
||||
print('Verified aggregate sig')
|
||||
return balance_deltas
|
||||
|
||||
|
||||
def update_ffg_and_crosslink_progress(crystallized_state, crosslinks, ffg_voter_bitmask, votes):
|
||||
# Verify the attestations of crosslink hashes
|
||||
crosslink_votes = {vote.shard_block_hash + vote.shard_id.to_bytes(2, 'big'):
|
||||
vote.voter_bitmask for vote in crosslinks}
|
||||
new_ffg_bitmask = bytearray(ffg_voter_bitmask)
|
||||
total_voters = 0
|
||||
for vote in votes:
|
||||
attestation = get_crosslink_aggvote_msg(vote.shard_id, vote.shard_block_hash, crystallized_state)
|
||||
indices = get_shard_attesters(crystallized_state, vote.shard_id)
|
||||
votekey = vote.shard_block_hash + vote.shard_id.to_bytes(2, 'big')
|
||||
if votekey not in crosslink_votes:
|
||||
crosslink_votes[votekey] = bytearray((len(indices) + 7) // 8)
|
||||
bitmask = crosslink_votes[votekey]
|
||||
pubs = []
|
||||
for i, index in enumerate(indices):
|
||||
if vote.signer_bitmask[i//8] & (128>>(i%8)):
|
||||
pubs.append(crystallized_state.active_validators[index].pubkey)
|
||||
if new_ffg_bitmask[index//8] & (128>>(index%8)) == 0:
|
||||
new_ffg_bitmask[index//8] ^= 128>>(index%8)
|
||||
bitmask[i//8] ^= 128>>(i%8)
|
||||
total_voters += 1
|
||||
assert verify(attestation, aggregate_pubs(pubs), vote.aggregate_sig)
|
||||
crosslink_votes[votekey] = bitmask
|
||||
print('Verified aggregate vote')
|
||||
new_crosslinks = [PartialCrosslinkRecord(shard_id=int.from_bytes(h[32:], 'big'),
|
||||
shard_block_hash=h[:32], voter_bitmask=crosslink_votes[h])
|
||||
for h in sorted(crosslink_votes.keys())]
|
||||
return new_crosslinks, new_ffg_bitmask, total_voters
|
||||
|
||||
def compute_state_transition(parent_state, parent_block, block, verify_sig=True):
|
||||
crystallized_state, active_state = parent_state
|
||||
# Initialize a new epoch if needed
|
||||
if active_state.height % SHARD_COUNT == 0:
|
||||
print('Processing epoch transition')
|
||||
# Process rewards from FFG/crosslink votes
|
||||
new_validator_records = deepcopy(crystallized_state.active_validators)
|
||||
# Who voted in the last epoch
|
||||
ffg_voter_bitmask = bytearray(active_state.ffg_voter_bitmask)
|
||||
# Balance changes, and total vote counts for FFG
|
||||
deltas1, total_vote_count, total_vote_deposits, justify, finalize = \
|
||||
process_ffg_deposits(crystallized_state, ffg_voter_bitmask)
|
||||
# Balance changes, and total vote counts for crosslinks
|
||||
deltas2, new_crosslink_records = process_crosslinks(crystallized_state, active_state.partial_crosslinks)
|
||||
# Process other balance deltas
|
||||
deltas3 = process_balance_deltas(crystallized_state, active_state.balance_deltas)
|
||||
for i, v in enumerate(new_validator_records):
|
||||
v.balance += deltas1[i] + deltas2[i] + deltas3[i]
|
||||
total_deposits = crystallized_state.total_deposits + sum(deltas1 + deltas2 + deltas3)
|
||||
print('New total deposits: %d' % total_deposits)
|
||||
|
||||
if finalize:
|
||||
new_queued_validators, new_active_validators, new_exited_validators = \
|
||||
get_incremented_validator_sets(crystallized_state, new_validator_records)
|
||||
else:
|
||||
new_queued_validators, new_active_validators, new_exited_validators = \
|
||||
crystallized_state.queued_validators, crystallized_state.active_validators, crystallized_state.exited_validators
|
||||
|
||||
crystallized_state = CrystallizedState(
|
||||
queued_validators=new_queued_validators,
|
||||
active_validators=new_active_validators,
|
||||
exited_validators=new_exited_validators,
|
||||
current_shuffling=get_shuffling(active_state.randao, len(new_active_validators)),
|
||||
last_justified_epoch = crystallized_state.current_epoch if justify else crystallized_state.last_justified_epoch,
|
||||
last_finalized_epoch = crystallized_state.current_epoch-1 if finalize else crystallized_state.last_finalized_epoch,
|
||||
dynasty = crystallized_state.dynasty + (1 if finalize else 0),
|
||||
next_shard = 0,
|
||||
current_epoch = crystallized_state.current_epoch + 1,
|
||||
crosslink_records = new_crosslink_records,
|
||||
total_deposits = total_deposits
|
||||
)
|
||||
# Reset the active state
|
||||
active_state = ActiveState(height=active_state.height,
|
||||
randao=active_state.randao,
|
||||
ffg_voter_bitmask=bytearray((len(crystallized_state.active_validators) + 7) // 8),
|
||||
balance_deltas=[],
|
||||
partial_crosslinks=[],
|
||||
total_skip_count=active_state.total_skip_count)
|
||||
# Process the block-by-block stuff
|
||||
|
||||
# Determine who the attesters and the main signer are
|
||||
attestation_indices, main_signer = \
|
||||
get_attesters_and_signer(crystallized_state, active_state, block.skip_count)
|
||||
|
||||
# Verify attestations
|
||||
balance_deltas = process_attestations(crystallized_state.active_validators,
|
||||
attestation_indices,
|
||||
block.attestation_bitmask,
|
||||
serialize(parent_block),
|
||||
block.attestation_aggregate_sig)
|
||||
# Reward main signer
|
||||
balance_deltas.append((main_signer << 24) + len(balance_deltas))
|
||||
|
||||
# Verify main signature
|
||||
if verify_sig:
|
||||
assert block.verify(crystallized_state.active_validators[main_signer].pubkey)
|
||||
print('Verified main sig')
|
||||
|
||||
# Update crosslink records
|
||||
new_crosslink_records, new_ffg_bitmask, voters = \
|
||||
update_ffg_and_crosslink_progress(crystallized_state, active_state.partial_crosslinks,
|
||||
active_state.ffg_voter_bitmask, block.shard_aggregate_votes)
|
||||
balance_deltas.append((main_signer << 24) + voters)
|
||||
|
||||
o = ActiveState(height=active_state.height + 1,
|
||||
randao=(int.from_bytes(active_state.randao, 'big') ^
|
||||
int.from_bytes(block.randao_reveal, 'big')).to_bytes(32, 'big'),
|
||||
total_skip_count=active_state.total_skip_count + block.skip_count,
|
||||
partial_crosslinks=new_crosslink_records,
|
||||
ffg_voter_bitmask=new_ffg_bitmask,
|
||||
balance_deltas=active_state.balance_deltas + balance_deltas)
|
||||
|
||||
return crystallized_state, o
|
||||
|
||||
def mk_genesis_state_and_block(pubkeys):
|
||||
c = CrystallizedState(
|
||||
active_validators=[ValidatorRecord(
|
||||
pubkey=pub,
|
||||
return_shard=0,
|
||||
return_address=blake(pub.to_bytes(32, 'big'))[-20:],
|
||||
randao_commitment=b'\x55'*32,
|
||||
balance=DEFAULT_BALANCE,
|
||||
switch_dynasty=9999999999999999999
|
||||
) for pub in pubkeys],
|
||||
queued_validators=[],
|
||||
exited_validators=[],
|
||||
current_shuffling=get_shuffling(b'\x35'*32, len(pubkeys)),
|
||||
current_epoch=1,
|
||||
last_justified_epoch=0,
|
||||
last_finalized_epoch=0,
|
||||
dynasty=1,
|
||||
next_shard=0,
|
||||
current_checkpoint=blake(b'insert EOS constitution here'),
|
||||
crosslink_records=[CrosslinkRecord(hash=b'\x00'*32, epoch=0) for i in range(SHARD_COUNT)],
|
||||
total_deposits=DEFAULT_BALANCE*len(pubkeys))
|
||||
a = ActiveState(height=1,
|
||||
randao=b'\x45'*32,
|
||||
ffg_voter_bitmask=bytearray((len(c.active_validators) + 7) // 8),
|
||||
balance_deltas=[],
|
||||
partial_crosslinks=[],
|
||||
total_skip_count=0)
|
||||
b = Block(parent_hash=b'\x00'*32,
|
||||
skip_count=0,
|
||||
randao_reveal=b'\x00'*32,
|
||||
attestation_bitmask=b'',
|
||||
attestation_aggregate_sig=[0,0],
|
||||
shard_aggregate_votes=[],
|
||||
main_chain_ref=b'\x00'*32,
|
||||
state_hash=blake(serialize(c))+blake(serialize(a)),
|
||||
sig=[0,0]
|
||||
)
|
||||
return c, a, b
|
||||
@@ -1,55 +0,0 @@
|
||||
from hashlib import blake2s
|
||||
import binascii
|
||||
|
||||
def hash(x): return blake2s(x).digest()
|
||||
|
||||
zerohashes = [b'\x00' * 32]
|
||||
for i in range(1, 32):
|
||||
zerohashes.append(hash(zerohashes[i-1] + zerohashes[i-1]))
|
||||
|
||||
# Add a value to a Merkle tree by using the algo
|
||||
# that stores a branch of sub-roots
|
||||
def add_value(branch, index, value):
|
||||
i = 0
|
||||
while (index+1) % 2**(i+1) == 0:
|
||||
i += 1
|
||||
for j in range(0, i):
|
||||
value = hash(branch[j] + value)
|
||||
# branch[j] = zerohashes[j]
|
||||
branch[i] = value
|
||||
|
||||
# Compute a Merkle root the dumb way
|
||||
def merkle_root(values):
|
||||
for h in range(32):
|
||||
if len(values) % 2 == 1:
|
||||
values.append(zerohashes[h])
|
||||
values = [hash(values[i] + values[i+1]) for i in range(0, len(values), 2)]
|
||||
return values[0]
|
||||
|
||||
def get_root_from_branch(branch, size):
|
||||
r = b'\x00' * 32
|
||||
for h in range(32):
|
||||
if (size >> h) % 2 == 1:
|
||||
r = hash(branch[h] + r)
|
||||
else:
|
||||
r = hash(r + zerohashes[h])
|
||||
return r
|
||||
|
||||
def branch_by_branch(values):
|
||||
branch = zerohashes[::]
|
||||
# Construct the tree using the branch-based algo
|
||||
for index, value in enumerate(values):
|
||||
add_value(branch, index, value)
|
||||
# Return the root
|
||||
return get_root_from_branch(branch, len(values))
|
||||
|
||||
testdata = [(i + 2**255).to_bytes(32, 'big') for i in range(10000)]
|
||||
|
||||
# The Merkle root algo assumes trailing zero bytes
|
||||
assert merkle_root(testdata[:5]) == merkle_root(testdata[:5] + [b'\x00' * 32] * 5)
|
||||
|
||||
# Verify equivalence of the simple all-at-once method and the progressive method
|
||||
assert branch_by_branch(testdata[:1]) == merkle_root(testdata[:1])
|
||||
assert branch_by_branch(testdata[:2]) == merkle_root(testdata[:2])
|
||||
assert branch_by_branch(testdata[:3]) == merkle_root(testdata[:3])
|
||||
assert branch_by_branch(testdata[:5049]) == merkle_root(testdata[:5049])
|
||||
@@ -1,88 +0,0 @@
|
||||
def serialize(val, typ=None):
|
||||
if typ is None and hasattr(val, 'fields'):
|
||||
typ = type(val)
|
||||
if typ in ('hash32', 'address'):
|
||||
assert len(val) == 20 if typ == 'address' else 32
|
||||
return val
|
||||
elif isinstance(typ, str) and typ[:3] == 'int':
|
||||
length = int(typ[3:])
|
||||
assert length % 8 == 0
|
||||
return val.to_bytes(length // 8, 'big')
|
||||
elif typ == 'bytes':
|
||||
return len(val).to_bytes(4, 'big') + val
|
||||
elif isinstance(typ, list):
|
||||
assert len(typ) == 1
|
||||
sub = b''.join([serialize(x, typ[0]) for x in val])
|
||||
return len(sub).to_bytes(4, 'big') + sub
|
||||
elif isinstance(typ, type):
|
||||
sub = b''.join([serialize(getattr(val, k), typ.fields[k]) for k in sorted(typ.fields.keys())])
|
||||
return len(sub).to_bytes(4, 'big') + sub
|
||||
raise Exception("Cannot serialize", val, typ)
|
||||
|
||||
def _deserialize(data, start, typ):
|
||||
if typ in ('hash32', 'address'):
|
||||
length = 20 if typ == 'address' else 32
|
||||
assert len(data) + start >= length
|
||||
return data[start: start+length], start+length
|
||||
elif isinstance(typ, str) and typ[:3] == 'int':
|
||||
length = int(typ[3:])
|
||||
assert length % 8 == 0
|
||||
assert len(data) + start >= length // 8
|
||||
return int.from_bytes(data[start: start+length//8], 'big'), start+length//8
|
||||
elif typ == 'bytes':
|
||||
length = int.from_bytes(data[start:start+4], 'big')
|
||||
assert len(data) + start >= 4+length
|
||||
return data[start+4: start+4+length], start+4+length
|
||||
elif isinstance(typ, list):
|
||||
assert len(typ) == 1
|
||||
length = int.from_bytes(data[start:start+4], 'big')
|
||||
pos, o = start + 4, []
|
||||
while pos < start + 4 + length:
|
||||
result, pos = _deserialize(data, pos, typ[0])
|
||||
o.append(result)
|
||||
assert pos == start + 4 + length
|
||||
return o, pos
|
||||
elif isinstance(typ, type):
|
||||
length = int.from_bytes(data[start:start+4], 'big')
|
||||
values = {}
|
||||
pos = start + 4
|
||||
for k in sorted(typ.fields.keys()):
|
||||
values[k], pos = _deserialize(data, pos, typ.fields[k])
|
||||
assert pos == start + 4 + length
|
||||
return typ(**values), pos
|
||||
raise Exception("Cannot deserialize", typ)
|
||||
|
||||
def deserialize(data, typ):
|
||||
return _deserialize(data, 0, typ)[0]
|
||||
|
||||
def eq(x, y):
|
||||
if hasattr(x, 'fields') and hasattr(y, 'fields'):
|
||||
for f in x.fields:
|
||||
if not eq(getattr(x, f), getattr(y, f)):
|
||||
print('Unequal:', x, y, f, getattr(x, f), getattr(y, f))
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
return x == y
|
||||
|
||||
def deepcopy(x):
|
||||
if hasattr(x, 'fields'):
|
||||
vals = {}
|
||||
for f in x.fields.keys():
|
||||
vals[f] = deepcopy(getattr(x, f))
|
||||
return x.__class__(**vals)
|
||||
elif isinstance(x, list):
|
||||
return [deepcopy(y) for y in x]
|
||||
else:
|
||||
return x
|
||||
|
||||
def to_dict(x):
|
||||
if hasattr(x, 'fields'):
|
||||
vals = {}
|
||||
for f in x.fields.keys():
|
||||
vals[f] = to_dict(getattr(x, f))
|
||||
return vals
|
||||
elif isinstance(x, list):
|
||||
return [to_dict(y) for y in x]
|
||||
else:
|
||||
return x
|
||||
@@ -1,53 +0,0 @@
|
||||
from bls import G1, G2, hash_to_G2, compress_G1, compress_G2, \
|
||||
decompress_G1, decompress_G2, normalize, multiply, \
|
||||
sign, privtopub, aggregate_sigs, aggregate_pubs, verify
|
||||
|
||||
from simpleserialize import serialize, deserialize, eq
|
||||
|
||||
from full_pos import ActiveState, CheckpointRecord
|
||||
|
||||
for x in (1, 5, 124, 735, 127409812145, 90768492698215092512159, 0):
|
||||
print('Testing with privkey %d' % x)
|
||||
p1 = multiply(G1, x)
|
||||
p2 = multiply(G2, x)
|
||||
msg = str(x).encode('utf-8')
|
||||
msghash = hash_to_G2(msg)
|
||||
assert normalize(decompress_G1(compress_G1(p1))) == normalize(p1)
|
||||
assert normalize(decompress_G2(compress_G2(p2))) == normalize(p2)
|
||||
assert normalize(decompress_G2(compress_G2(msghash))) == normalize(msghash)
|
||||
sig = sign(msg, x)
|
||||
pub = privtopub(x)
|
||||
assert verify(msg, pub, sig)
|
||||
|
||||
print('Testing signature aggregation')
|
||||
msg = b'cow'
|
||||
keys = [1, 5, 124, 735, 127409812145, 90768492698215092512159, 0]
|
||||
sigs = [sign(msg, k) for k in keys]
|
||||
pubs = [privtopub(k) for k in keys]
|
||||
aggsig = aggregate_sigs(sigs)
|
||||
aggpub = aggregate_pubs(pubs)
|
||||
assert verify(msg, aggpub, aggsig)
|
||||
|
||||
print('Testing basic serialization')
|
||||
|
||||
assert serialize(5, 'int8') == b'\x05'
|
||||
assert deserialize(b'\x05', 'int8') == 5
|
||||
assert serialize(2**32-3, 'int40') == b'\x00\xff\xff\xff\xfd'
|
||||
assert deserialize(b'\x00\xff\xff\xff\xfd', 'int40') == 2**32-3
|
||||
assert serialize(b'\x35'*20, 'address') == b'\x35'*20
|
||||
assert deserialize(b'\x35'*20, 'address') == b'\x35'*20
|
||||
assert serialize(b'\x35'*32, 'hash32') == b'\x35'*32
|
||||
assert deserialize(b'\x35'*32, 'hash32') == b'\x35'*32
|
||||
assert serialize(b'cow', 'bytes') == b'\x00\x00\x00\x03cow'
|
||||
assert deserialize(b'\x00\x00\x00\x03cow', 'bytes') == b'cow'
|
||||
|
||||
print('Testing advanced serialization')
|
||||
|
||||
|
||||
s = ActiveState()
|
||||
ds = deserialize(serialize(s, type(s)), type(s))
|
||||
assert eq(s, ds)
|
||||
s = ActiveState(checkpoints=[CheckpointRecord(checkpoint_hash=b'\x55'*32, bitmask=b'31337dawg')],
|
||||
height=555, randao=b'\x88'*32, balance_deltas=[5,7,9,579] + [3] * 333)
|
||||
ds = deserialize(serialize(s, type(s)), type(s))
|
||||
assert eq(s, ds)
|
||||
@@ -1,97 +0,0 @@
|
||||
from full_pos import blake, mk_genesis_state_and_block, compute_state_transition, \
|
||||
get_attesters_and_signer, Block, get_crosslink_aggvote_msg, AggregateVote, \
|
||||
SHARD_COUNT, ATTESTER_COUNT, get_shard_attesters
|
||||
import random
|
||||
import bls
|
||||
from simpleserialize import serialize, deserialize, eq, deepcopy
|
||||
import time
|
||||
|
||||
privkeys = [int.from_bytes(blake(str(i).encode('utf-8'))[:4], 'big') for i in range(100000)]
|
||||
print('Generated privkeys')
|
||||
keymap = {}
|
||||
for i,k in enumerate(privkeys):
|
||||
keymap[bls.privtopub(k)] = k
|
||||
if i%50 == 0:
|
||||
print("Generated %d keys" % i)
|
||||
|
||||
def mock_make_child(parent_state, parent, skips, attester_share=0.8, crosslink_shards=[]):
|
||||
crystallized_state, active_state = parent_state
|
||||
parent_attestation = serialize(parent)
|
||||
validator_count = len(crystallized_state.active_validators)
|
||||
indices, main_signer = get_attesters_and_signer(crystallized_state, active_state, skips)
|
||||
print('Selected indices: %r' % indices)
|
||||
print('Selected main signer: %d' % main_signer)
|
||||
# Randomly pick indices to include
|
||||
bitfield = [1 if random.random() < attester_share else 0 for i in indices]
|
||||
# Attestations
|
||||
sigs = [bls.sign(parent_attestation, keymap[crystallized_state.active_validators[indices[i]].pubkey])
|
||||
for i in range(len(indices)) if bitfield[i]]
|
||||
attestation_aggregate_sig = bls.aggregate_sigs(sigs)
|
||||
print('Aggregated sig')
|
||||
attestation_bitmask = bytearray((len(bitfield)-1) // 8 + 1)
|
||||
for i, b in enumerate(bitfield):
|
||||
attestation_bitmask[i//8] ^= (128 >> (i % 8)) * b
|
||||
print('Aggregate bitmask:', bin(int.from_bytes(attestation_bitmask, 'big')))
|
||||
# Randomly pick indices to include for crosslinks
|
||||
shard_aggregate_votes = []
|
||||
for shard, crosslinker_share in crosslink_shards:
|
||||
print('Making crosslink in shard %d' % shard)
|
||||
indices = get_shard_attesters(crystallized_state, shard)
|
||||
print('Indices: %r' % indices)
|
||||
bitfield = [1 if random.random() < crosslinker_share else 0 for i in indices]
|
||||
bitmask = bytearray((len(bitfield)+7) // 8)
|
||||
for i, b in enumerate(bitfield):
|
||||
bitmask[i//8] ^= (128 >> (i % 8)) * b
|
||||
print('Bitmask:', bin(int.from_bytes(bitmask, 'big')))
|
||||
shard_block_hash = blake(bytes([shard]))
|
||||
crosslink_attestation_hash = get_crosslink_aggvote_msg(shard, shard_block_hash, crystallized_state)
|
||||
sigs = [bls.sign(crosslink_attestation_hash, keymap[crystallized_state.active_validators[indices[i]].pubkey])
|
||||
for i in range(len(indices)) if bitfield[i]]
|
||||
v = AggregateVote(shard_id=shard,
|
||||
shard_block_hash=shard_block_hash,
|
||||
signer_bitmask=bitmask,
|
||||
aggregate_sig=list(bls.aggregate_sigs(sigs)))
|
||||
shard_aggregate_votes.append(v)
|
||||
print('Added %d shard aggregate votes' % len(crosslink_shards))
|
||||
# State calculations
|
||||
o = Block(parent_hash=blake(parent_attestation),
|
||||
skip_count=skips,
|
||||
randao_reveal=blake(str(random.random()).encode('utf-8')),
|
||||
attestation_bitmask=attestation_bitmask,
|
||||
attestation_aggregate_sig=list(attestation_aggregate_sig),
|
||||
shard_aggregate_votes=shard_aggregate_votes,
|
||||
main_chain_ref=b'\x00'*32,
|
||||
state_hash=b'\x00'*64)
|
||||
print('Generated preliminary block header')
|
||||
new_crystallized_state, new_active_state = \
|
||||
compute_state_transition((crystallized_state, active_state), parent, o, verify_sig=False)
|
||||
print('Calculated state transition')
|
||||
if crystallized_state == new_crystallized_state:
|
||||
o.state_hash = blake(parent.state_hash[:32] + blake(serialize(new_active_state)))
|
||||
else:
|
||||
o.state_hash = blake(blake(serialize(new_crystallized_state)) + blake(serialize(new_active_state)))
|
||||
# Main signature
|
||||
o.sign(keymap[crystallized_state.active_validators[main_signer].pubkey])
|
||||
print('Signed')
|
||||
return o, new_crystallized_state, new_active_state
|
||||
|
||||
c, a, block = mk_genesis_state_and_block(keymap.keys())
|
||||
print('Generated genesis state')
|
||||
print('Crystallized state length:', len(serialize(c)))
|
||||
print('Active state length:', len(serialize(a)))
|
||||
print('Block size:', len(serialize(block)))
|
||||
block2, c2, a2 = mock_make_child((c, a), block, 0, 0.8, [])
|
||||
t = time.time()
|
||||
assert compute_state_transition((c, a), block, block2)
|
||||
print("Normal block (basic attestation only) processed in %.4f sec" % (time.time() - t))
|
||||
print('Verified a block!')
|
||||
block3, c3, a3 = mock_make_child((c2, a2), block2, 0, 0.8, [(0, 0.75)])
|
||||
print('Verified a block with a committee!')
|
||||
while a3.height % SHARD_COUNT > 0:
|
||||
block3, c3, a3 = mock_make_child((c3, a3), block3, 0, 0.8, [(a3.height, 0.6 + 0.02 * a3.height)])
|
||||
print('Height: %d' % a3.height)
|
||||
print('FFG bitmask:', bin(int.from_bytes(a3.ffg_voter_bitmask, 'big')))
|
||||
block4, c4, a4 = mock_make_child((c3, a3), block3, 1, 0.55, [])
|
||||
t = time.time()
|
||||
assert compute_state_transition((c3, a3), block3, block4)
|
||||
print("Epoch transition processed in %.4f sec" % (time.time() - t))
|
||||
243
binary_fft.py
243
binary_fft.py
@@ -1,243 +0,0 @@
|
||||
def log2(x):
|
||||
return 0 if x <= 1 else 1 + log2(x // 2)
|
||||
|
||||
def raw_mul(a, b):
|
||||
if a*b == 0:
|
||||
return 0
|
||||
o = 0
|
||||
for i in range(log2(b) + 1):
|
||||
if b & (1<<i):
|
||||
o ^= a<<i
|
||||
return o
|
||||
|
||||
def raw_mod(a, b):
|
||||
blog = log2(b)
|
||||
alog = log2(a)
|
||||
while alog >= blog:
|
||||
if a & (1<<alog):
|
||||
a ^= (b << (alog - blog))
|
||||
alog -= 1
|
||||
return a
|
||||
|
||||
class BinaryField():
|
||||
def __init__(self, modulus):
|
||||
self.modulus = modulus
|
||||
self.height = log2(self.modulus)
|
||||
self.order = 2**self.height - 1
|
||||
for base in range(2, modulus - 1):
|
||||
powers = [1]
|
||||
while (len(powers) == 1 or powers[-1] != 1) and len(powers) < self.order + 2:
|
||||
powers.append(raw_mod(raw_mul(powers[-1], base), self.modulus))
|
||||
powers.pop()
|
||||
if len(powers) == self.order:
|
||||
self.cache = powers
|
||||
self.invcache = [None] * (self.order + 1)
|
||||
for i, p in enumerate(powers):
|
||||
self.invcache[p] = i
|
||||
return
|
||||
raise Exception("Bad modulus")
|
||||
|
||||
def add(self, x, y):
|
||||
return x ^ y
|
||||
|
||||
sub = add
|
||||
|
||||
def mul(self, x, y):
|
||||
return 0 if x*y == 0 else self.cache[(self.invcache[x] + self.invcache[y]) % self.order]
|
||||
|
||||
def sqr(self, x):
|
||||
return 0 if x == 0 else self.cache[(self.invcache[x] * 2) % self.order]
|
||||
|
||||
def div(self, x, y):
|
||||
return 0 if x == 0 else self.cache[(self.invcache[x] - self.invcache[y]) % self.order]
|
||||
|
||||
def inv(self, x):
|
||||
return self.cache[(self.order - self.invcache[x]) % self.order]
|
||||
|
||||
def exp(self, x, p):
|
||||
return 1 if p == 0 else 0 if x == 0 else self.cache[(self.invcache[x] * p) % self.order]
|
||||
|
||||
def multi_inv(self, values):
|
||||
partials = [1]
|
||||
for i in range(len(values)):
|
||||
partials.append(self.mul(partials[-1], values[i] or 1))
|
||||
inv = self.inv(partials[-1])
|
||||
outputs = [0] * len(values)
|
||||
for i in range(len(values), 0, -1):
|
||||
outputs[i-1] = self.mul(partials[i-1], inv) if values[i-1] else 0
|
||||
inv = self.mul(inv, values[i-1] or 1)
|
||||
return outputs
|
||||
|
||||
def div(self, x, y):
|
||||
return self.mul(x, self.inv(y))
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(self, p, x):
|
||||
y = 0
|
||||
power_of_x = 1
|
||||
for i, p_coeff in enumerate(p):
|
||||
y ^= self.mul(power_of_x, p_coeff)
|
||||
power_of_x = self.mul(power_of_x, x)
|
||||
return y
|
||||
|
||||
# Arithmetic for polynomials
|
||||
def add_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) ^ (b[i] if i < len(b) else 0))
|
||||
for i in range(max(len(a), len(b)))]
|
||||
|
||||
sub_polys = add_polys
|
||||
|
||||
def mul_by_const(self, a, c):
|
||||
return [self.mul(x, c) for x in a]
|
||||
|
||||
def mul_polys(self, a, b):
|
||||
o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] ^= self.mul(a[i], b[j])
|
||||
return o
|
||||
|
||||
def div_polys(self, a, b):
|
||||
assert len(a) >= len(b)
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = self.div(a[apos], b[bpos])
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] ^= self.mul(b[i], quot)
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return o
|
||||
|
||||
# Build a polynomial that returns 0 at all specified xs
|
||||
def zpoly(self, xs):
|
||||
root = [1]
|
||||
for x in xs:
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
root[j] ^= self.mul(root[j+1], x)
|
||||
return root
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(self, xs, ys):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = self.zpoly(xs)
|
||||
assert len(root) == len(ys) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
nums = [self.div_polys(root, [x, 1]) for x in xs]
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = [self.eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
invdenoms = self.multi_inv(denoms)
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
b = [0 for y in ys]
|
||||
for i in range(len(xs)):
|
||||
yslice = self.mul(ys[i], invdenoms[i])
|
||||
for j in range(len(ys)):
|
||||
if nums[i][j] and ys[i]:
|
||||
b[j] ^= self.mul(nums[i][j], yslice)
|
||||
return b
|
||||
|
||||
def _simple_ft(field, vals):
|
||||
assert len(vals) == 2**field.height
|
||||
return [field.eval_poly_at(vals, i) for i in range(2**field.height)]
|
||||
|
||||
# Returns `evens` and `odds` such that:
|
||||
# poly(x) = evens(x^2+kx) + x * odds(x^2+kx)
|
||||
# poly(x+k) = evens(x^2+kx) + (x+k) * odds(x^2+kx)
|
||||
#
|
||||
# Note that this satisfies two other invariants:
|
||||
#
|
||||
# poly(x+k) - poly(x) = k * odds(x^2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x^2+kx)
|
||||
|
||||
def cast(field, poly, k):
|
||||
if len(poly) <= 2:
|
||||
return ([poly[0]], [poly[1] if len(poly) == 2 else 0])
|
||||
mod_power = 2
|
||||
while mod_power * 2 < len(poly):
|
||||
mod_power *= 2
|
||||
half_mod_power = mod_power // 2
|
||||
k_to_half_mod_power = field.exp(k, half_mod_power)
|
||||
low = poly + [0] * (mod_power * 2 - len(poly))
|
||||
high = low[len(low)-half_mod_power:]
|
||||
low = low[:len(low)-mod_power] + [low[i] ^ field.mul(low[i+half_mod_power], k_to_half_mod_power) for i in range(len(low)-mod_power, len(low)-half_mod_power)]
|
||||
high = low[len(low)-half_mod_power:] + high
|
||||
low = low[:len(low)-mod_power] + [low[i] ^ field.mul(low[i+half_mod_power], k_to_half_mod_power) for i in range(len(low)-mod_power, len(low)-half_mod_power)]
|
||||
low_cast = cast(field, low, k)
|
||||
high_cast = cast(field, high, k)
|
||||
return (low_cast[0] + high_cast[0], low_cast[1] + high_cast[1])
|
||||
|
||||
# Returns a polynomial p2 such that p2(x) = poly(x^2+kx)
|
||||
def compose(field, poly, k):
|
||||
if len(poly) == 1:
|
||||
return poly + [0]
|
||||
mod_power = 1
|
||||
while mod_power * 2 < len(poly):
|
||||
mod_power *= 2
|
||||
k_to_mod_power = field.exp(k, mod_power)
|
||||
low = compose(field, poly[:mod_power], k) + [0] * mod_power * 3
|
||||
high = compose(field, poly[mod_power:], k) + [0] * mod_power * 3
|
||||
return [low[i] ^ field.mul(high[i-mod_power], k_to_mod_power) ^ high[i-2*mod_power] for i in range(mod_power*4)]
|
||||
|
||||
# Equivalent to [field.eval_poly_at(poly, x) for x in domain]
|
||||
def fft(field, poly, domain):
|
||||
# Base case: constant polynomials
|
||||
if len(domain) == 1:
|
||||
return [poly[0]]
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Get evens, odds such that:
|
||||
# poly(x) = evens(x^2+offset*x) + x * odds(x^2+offset*x)
|
||||
# poly(x+k) = evens(x^2+offset*x) + (x+k) * odds(x^2+offset*x)
|
||||
evens, odds = cast(field, poly, offset)
|
||||
# The smaller domain D = [x**2 - offset*x for x in A] = [x**2 - offset*x for x in B]
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain][::2]
|
||||
# Two half-size sub-problems over the smaller domain, recovering
|
||||
# evaluations of evens and odds over the smaller domain
|
||||
even_points = fft(field, evens, casted_domain)
|
||||
odd_points = fft(field, odds, casted_domain)
|
||||
# Combine the evaluations of evens and odds into evaluations of poly
|
||||
L = [e ^ field.mul(d, o) for d,e,o in zip(domain[::2], even_points, odd_points)]
|
||||
R = [e ^ field.mul(d, o) for d,e,o in zip(domain[1::2], even_points, odd_points)]
|
||||
return [R[i//2] if i%2 else L[i//2] for i in range(len(domain))]
|
||||
|
||||
# The inverse function of fft, does the steps backwards
|
||||
def invfft(field, vals, domain):
|
||||
# Base case: constant polynomials
|
||||
if len(domain) == 1:
|
||||
return [vals[0]]
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Compute the evaluations of the evens and odds polynomials using the invariants:
|
||||
# poly(x+k) - poly(x) = k * odds(x^2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x^2+kx)
|
||||
L, R = vals[::2], vals[1::2]
|
||||
even_points = [field.div(field.mul(l, d ^ offset) ^ field.mul(r, d), offset) for d, l, r in zip(domain[::2], L, R)]
|
||||
odd_points = [field.div(l ^ r, offset) for d, l, r in zip(domain[::2], L, R)]
|
||||
# The smaller domain D = [x**2 - offset*x for x in A] = [x**2 - offset*x for x in B]
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain][::2]
|
||||
# Two half-size problems over the smaller domains, recovering
|
||||
# the polynomials evens and odds
|
||||
evens = invfft(field, even_points, casted_domain)
|
||||
odds = invfft(field, odd_points, casted_domain)
|
||||
# Given evens and odds where poly(x) = evens(x^2+offset*x) + x * odds(x^2+offset*x),
|
||||
# recover poly
|
||||
composed_evens = compose(field, evens, offset) + [0]
|
||||
composed_odds = compose(field, odds, offset) + [0]
|
||||
o = [composed_evens[i] ^ composed_odds[i-1] for i in range(len(vals))]
|
||||
return o
|
||||
@@ -1,357 +0,0 @@
|
||||
def log2(x):
|
||||
o = 0
|
||||
while x > 1:
|
||||
x //= 2
|
||||
o += 1
|
||||
return o
|
||||
|
||||
def is_power_of_2(x):
|
||||
return x > 0 and x&(x-1) == 0
|
||||
|
||||
def raw_mul(a, b):
|
||||
if a*b == 0:
|
||||
return 0
|
||||
o = 0
|
||||
for i in range(log2(b) + 1):
|
||||
if b & (1<<i):
|
||||
o ^= a<<i
|
||||
return o
|
||||
|
||||
def raw_mod(a, b):
|
||||
blog = log2(b)
|
||||
alog = log2(a)
|
||||
while alog >= blog:
|
||||
if a & (1<<alog):
|
||||
a ^= (b << (alog - blog))
|
||||
alog -= 1
|
||||
return a
|
||||
|
||||
class BinaryField():
|
||||
def __init__(self, modulus):
|
||||
self.modulus = modulus
|
||||
self.height = log2(self.modulus)
|
||||
self.order = 2**self.height - 1
|
||||
for base in range(2, min(modulus - 1, 80)):
|
||||
powers = [1]
|
||||
while (len(powers) == 1 or powers[-1] != 1) and len(powers) < self.order + 2:
|
||||
powers.append(raw_mod(raw_mul(powers[-1], base), self.modulus))
|
||||
powers.pop()
|
||||
if len(powers) == self.order:
|
||||
self.cache = powers + powers
|
||||
self.invcache = [None] * (self.order + 1)
|
||||
for i, p in enumerate(powers):
|
||||
self.invcache[p] = i
|
||||
return
|
||||
raise Exception("Bad modulus")
|
||||
|
||||
def add(self, x, y):
|
||||
return x ^ y
|
||||
|
||||
sub = add
|
||||
|
||||
def mul(self, x, y):
|
||||
return 0 if x*y == 0 else self.cache[self.invcache[x] + self.invcache[y]]
|
||||
|
||||
def sqr(self, x):
|
||||
return 0 if x == 0 else self.cache[(self.invcache[x] * 2) % self.order]
|
||||
|
||||
def div(self, x, y):
|
||||
return 0 if x == 0 else self.cache[self.invcache[x] + self.order - self.invcache[y]]
|
||||
|
||||
def inv(self, x):
|
||||
assert x != 0
|
||||
return self.cache[(self.order - self.invcache[x]) % self.order]
|
||||
|
||||
def exp(self, x, p):
|
||||
return 1 if p == 0 else 0 if x == 0 else self.cache[(self.invcache[x] * p) % self.order]
|
||||
|
||||
def multi_inv(self, values):
|
||||
partials = [1]
|
||||
for i in range(len(values)):
|
||||
partials.append(self.mul(partials[-1], values[i] or 1))
|
||||
inv = self.inv(partials[-1])
|
||||
outputs = [0] * len(values)
|
||||
for i in range(len(values), 0, -1):
|
||||
outputs[i-1] = self.mul(partials[i-1], inv) if values[i-1] else 0
|
||||
inv = self.mul(inv, values[i-1] or 1)
|
||||
return outputs
|
||||
|
||||
def div(self, x, y):
|
||||
return self.mul(x, self.inv(y))
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(self, p, x):
|
||||
y = 0
|
||||
power_of_x = 1
|
||||
for i, p_coeff in enumerate(p):
|
||||
y ^= self.mul(power_of_x, p_coeff)
|
||||
power_of_x = self.mul(power_of_x, x)
|
||||
return y
|
||||
|
||||
# Arithmetic for polynomials
|
||||
def add_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) ^ (b[i] if i < len(b) else 0))
|
||||
for i in range(max(len(a), len(b)))]
|
||||
|
||||
sub_polys = add_polys
|
||||
|
||||
def mul_by_const(self, a, c):
|
||||
return [self.mul(x, c) for x in a]
|
||||
|
||||
def mul_polys(self, a, b):
|
||||
o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] ^= self.mul(a[i], b[j])
|
||||
return o
|
||||
|
||||
def div_polys(self, a, b):
|
||||
assert len(a) >= len(b)
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = self.div(a[apos], b[bpos])
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] ^= self.mul(b[i], quot)
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return o
|
||||
|
||||
# Build a polynomial that returns 0 at all specified xs
|
||||
def zpoly(self, xs):
|
||||
root = [1]
|
||||
for x in xs:
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
root[j] ^= self.mul(root[j+1], x)
|
||||
return root
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(self, xs, ys):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = self.zpoly(xs)
|
||||
assert len(root) == len(ys) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
nums = [self.div_polys(root, [x, 1]) for x in xs]
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = [self.eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
invdenoms = self.multi_inv(denoms)
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
b = [0 for y in ys]
|
||||
for i in range(len(xs)):
|
||||
yslice = self.mul(ys[i], invdenoms[i])
|
||||
for j in range(len(ys)):
|
||||
if nums[i][j] and ys[i]:
|
||||
b[j] ^= self.mul(nums[i][j], yslice)
|
||||
return b
|
||||
|
||||
def _simple_ft(field, domain, poly):
|
||||
return [field.eval_poly_at(poly, i) for i in domain]
|
||||
|
||||
# Returns `evens` and `odds` such that:
|
||||
# poly(x) = evens(x**2+kx) + x * odds(x**2+kx)
|
||||
# poly(x+k) = evens(x**2+kx) + (x+k) * odds(x**2+kx)
|
||||
#
|
||||
# Note that this satisfies two other invariants:
|
||||
#
|
||||
# poly(x+k) - poly(x) = k * odds(x**2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x**2+kx)
|
||||
|
||||
def cast(field, poly, k):
|
||||
if len(poly) <= 2:
|
||||
return ([poly[0]], [poly[1] if len(poly) == 2 else 0])
|
||||
assert is_power_of_2(len(poly))
|
||||
mod_power = len(poly)//2
|
||||
half_mod_power = mod_power // 2
|
||||
k_to_half_mod_power = field.exp(k, half_mod_power)
|
||||
# Calculate low = poly % (x**2 - k*x)**half_mod_power
|
||||
# and high = poly // (x**2 - k*x)**half_mod_power
|
||||
# Note that (x**2 - k*x)**n = x**2n - k**n * x**n in binary fields
|
||||
low_and_high = poly[::]
|
||||
for i in range(mod_power, half_mod_power*3):
|
||||
low_and_high[i] ^= field.mul(low_and_high[i+half_mod_power], k_to_half_mod_power)
|
||||
for i in range(half_mod_power, mod_power):
|
||||
low_and_high[i] ^= field.mul(low_and_high[i+half_mod_power], k_to_half_mod_power)
|
||||
# Recursively compute two half-size sub-problems, low and high
|
||||
low_cast = cast(field, low_and_high[:mod_power], k)
|
||||
high_cast = cast(field, low_and_high[mod_power:], k)
|
||||
# Combine the results
|
||||
return (low_cast[0] + high_cast[0], low_cast[1] + high_cast[1])
|
||||
|
||||
# Returns a polynomial p2 such that p2(x) = poly(x**2+kx)
|
||||
def compose(field, poly, k):
|
||||
if len(poly) == 2:
|
||||
return [poly[0], field.mul(poly[1], k), poly[1], 0]
|
||||
if len(poly) == 1:
|
||||
return poly + [0]
|
||||
# Largest mod_power=2**k such that mod_power >= len(poly)/2
|
||||
assert is_power_of_2(len(poly))
|
||||
mod_power = len(poly)//2
|
||||
k_to_mod_power = field.exp(k, mod_power)
|
||||
# Recursively compute two half-size sub-problems, the bottom and top half
|
||||
# of the polynomial
|
||||
low = compose(field, poly[:mod_power], k)
|
||||
high = compose(field, poly[mod_power:], k)
|
||||
# Combine them together, multiplying the top one by (x**2-k*x)**n
|
||||
# Note that (x**2 - k*x)**n = x**2n - k**n * x**n in binary fields
|
||||
o = [0] * len(poly) * 2
|
||||
for i, (L, H) in enumerate(zip(low, high)):
|
||||
o[i] ^= L
|
||||
o[i+mod_power] ^= field.mul(H, k_to_mod_power)
|
||||
o[i+2*mod_power] ^= H
|
||||
return o
|
||||
|
||||
# Equivalent to [field.eval_poly_at(poly, x) for x in domain]
|
||||
# Special thanks to www.math.clemson.edu/~sgao/papers/GM10.pdf for insights
|
||||
# though this algorithm is not exactly identical to any algorithm in the paper
|
||||
def fft(field, domain, poly):
|
||||
# Base case: constant polynomials
|
||||
# if len(domain) == 1:
|
||||
# return [poly[0]]
|
||||
if len(domain) <= 8:
|
||||
return _simple_ft(field, domain, poly)
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Get evens, odds such that:
|
||||
# poly(x) = evens(x**2+offset*x) + x * odds(x**2+offset*x)
|
||||
# poly(x+k) = evens(x**2+offset*x) + (x+k) * odds(x**2+offset*x)
|
||||
evens, odds = cast(field, poly, offset)
|
||||
# The smaller domain D = [x**2 - offset*x for x in A] = [x**2 - offset*x for x in B]
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain[::2]]
|
||||
# Two half-size sub-problems over the smaller domain, recovering
|
||||
# evaluations of evens and odds over the smaller domain
|
||||
even_points = fft(field, casted_domain, evens)
|
||||
odd_points = fft(field, casted_domain, odds)
|
||||
# Combine the evaluations of evens and odds into evaluations of poly
|
||||
o = []
|
||||
for i in range(len(domain)//2):
|
||||
o.append(even_points[i] ^ field.mul(domain[i*2], odd_points[i]))
|
||||
o.append(even_points[i] ^ field.mul(domain[i*2+1], odd_points[i]))
|
||||
return o
|
||||
|
||||
# The inverse function of fft, does the steps backwards
|
||||
def invfft(field, domain, vals):
|
||||
# Base case: constant polynomials
|
||||
if len(domain) == 1:
|
||||
return [vals[0]]
|
||||
# if len(domain) <= 4:
|
||||
# return field.lagrange_interp(domain, vals)
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Compute the evaluations of the evens and odds polynomials using the invariants:
|
||||
# poly(x+k) - poly(x) = k * odds(x**2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x**2+kx)
|
||||
even_points = [0] * (len(vals)//2)
|
||||
odd_points = [0] * (len(vals)//2)
|
||||
for i in range(len(domain)//2):
|
||||
p_of_x, p_of_x_plus_k = vals[i*2], vals[i*2+1]
|
||||
x = domain[i*2]
|
||||
even_points[i] = field.div(field.mul(p_of_x, x ^ offset) ^ field.mul(p_of_x_plus_k, x), offset)
|
||||
odd_points[i] = field.div(p_of_x ^ p_of_x_plus_k, offset)
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain[::2]]
|
||||
# Two half-size problems over the smaller domains, recovering
|
||||
# the polynomials evens and odds
|
||||
evens = invfft(field, casted_domain, even_points)
|
||||
odds = invfft(field, casted_domain, odd_points)
|
||||
# Given evens and odds where poly(x) = evens(x**2+offset*x) + x * odds(x**2+offset*x),
|
||||
# recover poly
|
||||
composed_evens = compose(field, evens, offset) + [0]
|
||||
composed_odds = [0] + compose(field, odds, offset)
|
||||
o = [composed_evens[i] ^ composed_odds[i] for i in range(len(vals))]
|
||||
return o
|
||||
|
||||
# shift_polys[i][j] is the 2**j degree coefficient of the polynomial that evaluates to [1,1...1, 0,0....0] with 2**(i-1) ones and 2**(i-1) zeroes
|
||||
shift_polys = [[], [1], [32755, 32755], [52774, 60631, 8945], [38902, 5560, 44524, 12194], [55266, 46488, 60321, 5401, 40130], [21827, 32224, 51565, 15072, 8277, 64379], [59460, 15452, 60370, 24737, 20321, 35516, 39606], [42623, 56997, 25925, 15351, 16625, 47045, 38250, 17462], [7575, 27410, 32434, 22187, 28933, 15447, 37964, 38186, 4776], [39976, 61188, 42456, 2155, 6178, 34033, 52305, 14913, 2896, 48908], [6990, 12021, 36054, 16198, 17011, 14018, 58553, 13272, 25318, 5288, 21429], [16440, 34925, 14360, 22561, 43883, 36645, 7613, 26531, 8597, 59502, 61283, 53412]]
|
||||
|
||||
def invfft2(field, vals):
|
||||
if len(vals) == 1:
|
||||
return [vals[0]]
|
||||
L = invfft2(field, vals[:len(vals)//2])
|
||||
R = shift(field, invfft2(field, vals[len(vals)//2:]), len(vals)//2)
|
||||
o = [0] * len(vals)
|
||||
for j, (l, r) in enumerate(zip(L, R)):
|
||||
o[j] ^= l
|
||||
for i, coeff in enumerate(shift_polys[log2(len(vals))]):
|
||||
o[2**i+j] ^= field.mul(l ^ r, coeff)
|
||||
# print(vals, o)
|
||||
return o
|
||||
|
||||
# def invfft(field, domain, vals): return invfft2(field, vals)
|
||||
|
||||
# Multiplies two polynomials using the FFT method
|
||||
def mul(field, domain, p1, p2):
|
||||
assert len(p1) <= len(domain) and len(p2) <= len(domain)
|
||||
values1 = fft(field, domain, p1)
|
||||
values2 = fft(field, domain, p2)
|
||||
values3 = [field.mul(v1, v2) for v1, v2 in zip(values1, values2)]
|
||||
return invfft(field, domain, values3)
|
||||
|
||||
# Generates the polynomial `p(x) = (x - xs[0]) * (x - xs[1]) * ...`
|
||||
def zpoly(field, xs):
|
||||
if len(xs) == 0:
|
||||
# print([1], domain, xs)
|
||||
return [1]
|
||||
if len(xs) == 1:
|
||||
# print([xs[0], 1], domain, xs)
|
||||
return [xs[0], 1]
|
||||
domain = list(range(2**log2(max(xs)+1) * 2))
|
||||
offset = domain[1]
|
||||
zL = zpoly(field, xs[::2])
|
||||
zR = zpoly(field, xs[1::2])
|
||||
o = mul(field, domain, zL, zR)
|
||||
# print(o, domain, xs)
|
||||
return o
|
||||
|
||||
# Returns q(x) = p(x + k)
|
||||
def shift(field, poly, k):
|
||||
if len(poly) == 1:
|
||||
return poly
|
||||
# Largest mod_power=2**k such that mod_power >= len(poly)/2
|
||||
assert is_power_of_2(len(poly))
|
||||
mod_power = len(poly)//2
|
||||
k_to_mod_power = field.exp(k, mod_power)
|
||||
# Calculate low = poly % (x+k)**mod_power
|
||||
# and high = poly // (x+k)**mod_power
|
||||
# Note that (x+k)**n = x**n + k**n for power-of-two powers in binary fields
|
||||
low_and_high = poly[::]
|
||||
for i in range(mod_power):
|
||||
low_and_high[i] ^= field.mul(low_and_high[i+mod_power], k_to_mod_power)
|
||||
return shift(field, low_and_high[:mod_power], k) + shift(field, low_and_high[mod_power:], k)
|
||||
|
||||
# Interpolates the polynomial where `p(xs[i]) = vals[i]`
|
||||
def interpolate(field, xs, vals):
|
||||
domain_size = 2**(log2(max(xs)) + 1)
|
||||
assert domain_size * 2 <= 2**field.height
|
||||
domain = list(range(domain_size))
|
||||
big_domain = list(range(domain_size * 2))
|
||||
z = zpoly(field, [x for x in domain if x not in xs])
|
||||
# print("z = ", z)
|
||||
z_values = fft(field, big_domain, z)
|
||||
# print("z_values = ", z_values)
|
||||
p_times_z_values = [0] * len(domain)
|
||||
for v, d in zip(vals, xs):
|
||||
p_times_z_values[d] = field.mul(v, z_values[d])
|
||||
# print("p_times_z_values = ", p_times_z_values)
|
||||
p_times_z = invfft(field, domain, p_times_z_values)
|
||||
# print("p_times_z = ", p_times_z)
|
||||
shifted_p_times_z_values = fft(field, big_domain, p_times_z)[domain_size:]
|
||||
# print("shifted_p_times_z_values =", shifted_p_times_z_values)
|
||||
shifted_p_values = [field.div(x, y) for x,y in zip(shifted_p_times_z_values, z_values[domain_size:])]
|
||||
# print("shifted_p_values =", shifted_p_values)
|
||||
shifted_p = invfft(field, domain, shifted_p_values)
|
||||
return shift(field, shifted_p, domain_size)
|
||||
@@ -1,164 +0,0 @@
|
||||
# import fast_binary_fft as b
|
||||
import binary_fft as b
|
||||
f = b.BinaryField(65579)
|
||||
from hashlib import sha256
|
||||
def hash(x): return sha256(x).digest()
|
||||
|
||||
log2 = b.log2
|
||||
|
||||
# Alternative optimized/simplified inverse-FFT implementation
|
||||
|
||||
# shift_polys[i][j] is the 2**j degree coefficient of the polynomial that evaluates to [1,1...1, 0,0....0] with 2**(i-1) ones and 2**(i-1) zeroes
|
||||
shift_polys = [[], [1], [32755, 32755], [52774, 60631, 8945], [38902, 5560, 44524, 12194], [55266, 46488, 60321, 5401, 40130], [21827, 32224, 51565, 15072, 8277, 64379], [59460, 15452, 60370, 24737, 20321, 35516, 39606], [42623, 56997, 25925, 15351, 16625, 47045, 38250, 17462], [7575, 27410, 32434, 22187, 28933, 15447, 37964, 38186, 4776], [39976, 61188, 42456, 2155, 6178, 34033, 52305, 14913, 2896, 48908], [6990, 12021, 36054, 16198, 17011, 14018, 58553, 13272, 25318, 5288, 21429], [16440, 34925, 14360, 22561, 43883, 36645, 7613, 26531, 8597, 59502, 61283, 53412]]
|
||||
|
||||
def invfft2(field, vals):
|
||||
if len(vals) == 1:
|
||||
return [vals[0]]
|
||||
L = invfft2(field, vals[:len(vals)//2])
|
||||
R = b.shift(field, invfft2(field, vals[len(vals)//2:]), len(vals)//2)
|
||||
o = [0] * len(vals)
|
||||
for j, (l, r) in enumerate(zip(L, R)):
|
||||
o[j] ^= l
|
||||
for i, coeff in enumerate(shift_polys[log2(len(vals))]):
|
||||
o[2**i+j] ^= field.mul(l ^ r, coeff)
|
||||
# print(vals, o)
|
||||
return o
|
||||
|
||||
# Alternative simplified but less efficient FFT implementation
|
||||
def p_mod_shift(field, poly):
|
||||
shift_poly = shift_polys[log2(len(poly))]
|
||||
half_height = len(poly)//2
|
||||
low = poly[::]
|
||||
high = []
|
||||
while len(high) < half_height:
|
||||
high.insert(0, field.div(low[-1], shift_poly[-1]))
|
||||
for i, coeff in enumerate(shift_poly[:-1]):
|
||||
low[-half_height-1+2**i] ^= field.mul(high[0], coeff)
|
||||
low.pop()
|
||||
return high, low
|
||||
|
||||
def fft2(field, poly):
|
||||
# if len(poly) == 1:
|
||||
# return [poly[0]]
|
||||
if len(poly) <= 8:
|
||||
return b._simple_ft(field, list(range(len(poly))), poly)
|
||||
# p(x) = high(x) * s(x) + low(x)
|
||||
high, low = p_mod_shift(field, poly)
|
||||
# p(x) = g(x) * (s(x)+1) + h(x) * s(x)
|
||||
g, h = low, [x^y for x,y in zip(high, low)]
|
||||
# print(poly, g, h)
|
||||
g_values = fft2(field, g)
|
||||
h_values = fft2(field, b.shift(field, h, len(poly)//2))
|
||||
return g_values + h_values
|
||||
|
||||
def eval_polynomial_at(poly, x):
|
||||
return f.eval_poly_at(poly, x)
|
||||
|
||||
def interpolate(xs, values):
|
||||
if xs == list(range(len(xs))):
|
||||
return invfft2(f, values)
|
||||
else:
|
||||
return b.interpolate(f, xs, values)
|
||||
|
||||
def next_power_of_two(n):
|
||||
return 2**log2(n-1) * 2
|
||||
|
||||
def multi_evaluate(xs, poly):
|
||||
# degree = next_power_of_two(max(xs)+1)
|
||||
extended_values = b.fft(f, list(range(next_power_of_two(max(xs)+1))), poly)
|
||||
# extended_values = fft2(f, poly + [0] * (degree - len(poly)))
|
||||
return [extended_values[x] for x in xs]
|
||||
|
||||
def get_merkle_root(values):
|
||||
extended_length = 2**log2(len(values))
|
||||
tree = [None] * extended_length + values + [b'\x00'*32] * (extended_length - len(values))
|
||||
for i in range(extended_length - 1, 0, -1):
|
||||
tree[i] = hash(tree[i*2] + tree[i*2+1])
|
||||
return tree[1]
|
||||
|
||||
def int_to_bytes(i, n):
|
||||
return i.to_bytes(n, 'little')
|
||||
|
||||
def bytes_to_int(b):
|
||||
return int.from_bytes(b, 'little')
|
||||
|
||||
def integer_squareroot(n: int) -> int:
|
||||
"""
|
||||
The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``.
|
||||
"""
|
||||
assert n >= 0
|
||||
x = n
|
||||
y = (x + 1) // 2
|
||||
while y < x:
|
||||
x = y
|
||||
y = (x + n // x) // 2
|
||||
return x
|
||||
|
||||
Bytes32 = 0
|
||||
List = {int: 1, Bytes32: 2, 2: 2}
|
||||
FIELD_ELEMENT_BITS = 16
|
||||
ZERO_HASH = b'\x00' * 32
|
||||
|
||||
def fill(xs: List[int], values: List[int], length: int) -> List[int]:
|
||||
"""
|
||||
Takes the minimal polynomial that returns values[i] at xs[i] and computes
|
||||
its outputs for all values in range(0, length)
|
||||
"""
|
||||
poly = interpolate(xs, values)
|
||||
# o = values + multi_evaluate(list(range(len(values))), b.shift(f, poly, len(values)))
|
||||
# return o
|
||||
return multi_evaluate(list(range(length)), poly)
|
||||
|
||||
def fill_axis(xs: List[int], values: List[Bytes32], length: int) -> List[Bytes32]:
|
||||
"""
|
||||
Interprets a series of 32-byte chunks as a series of ordered packages of field
|
||||
elements. For each i, treats the set of i'th field elements in each chunk as
|
||||
evaluations of a polynomial. Evaluates the polynomials on the extended domain
|
||||
range(0, length) and provides the 32-byte chunks that are the packages of the
|
||||
extended evaluations of every polynomial at each coordinate.
|
||||
"""
|
||||
data = [[bytes_to_int(a[i: i + FIELD_ELEMENT_BITS//8]) for a in values] for i in range(0, 32, FIELD_ELEMENT_BITS//8)]
|
||||
newdata = [fill(xs, d, length) for d in data]
|
||||
return [b''.join([int_to_bytes(n[i], FIELD_ELEMENT_BITS//8) for n in newdata]) for i in range(length)]
|
||||
|
||||
def get_data_square(data: bytes) -> List[List[Bytes32]]:
|
||||
"""
|
||||
Converts data into a 2**k x 2**k square, padding with zeroes if necessary.
|
||||
"""
|
||||
print("Data length:", len(data))
|
||||
chunks = [data[i: i+32] for i in range(0, len(data), 32)]
|
||||
if len(chunks[-1]) < 32:
|
||||
chunks[-1] += b'\x00' * (32 - len(chunks[-1]))
|
||||
print("Chunks:", len(chunks))
|
||||
target_size = 4**(log2(len(chunks) - 1) // 2 + 1)
|
||||
chunks.extend([ZERO_HASH for i in range(len(chunks), target_size)])
|
||||
print("Extended to:", len(chunks))
|
||||
side_length = integer_squareroot(len(chunks))
|
||||
print("Side length:", side_length)
|
||||
return [chunks[i: i + side_length] for i in range(0, len(chunks), side_length)]
|
||||
|
||||
def extend_data_square(square: List[List[Bytes32]]) -> List[List[Bytes32]]:
|
||||
"""
|
||||
Extends a 2**k x 2**k square to 2**(k+1) x 2**(k+1) using `fill_axis` to
|
||||
fill rows and columns.
|
||||
"""
|
||||
L = len(square)
|
||||
# Extend each row
|
||||
square = [fill_axis(list(range(L)), row, L * 2) for row in square]
|
||||
# Flip rows and columns
|
||||
square = [[square[j][i] for i in range(L)] for j in range(len(square))]
|
||||
# Extend each column
|
||||
square = [fill_axis(list(range(L)), row, L * 2) for row in square]
|
||||
# Flip back to row form
|
||||
square = [[square[j][i] for i in range(L)] for j in range(len(square))]
|
||||
return square
|
||||
|
||||
def mk_data_root(data: bytes) -> Bytes32:
|
||||
"""
|
||||
Computes the root of the package of rows and colums of a given piece of data.
|
||||
"""
|
||||
square = extend_data_square(get_data_square(data))
|
||||
row_roots = [get_merkle_root(r) for r in square]
|
||||
transposed_square = [[square[j][i] for i in range(len(square))] for j in range(len(square))]
|
||||
column_roots = [get_merkle_root(r) for r in transposed_square]
|
||||
return hash(get_merkle_root(row_roots) + get_merkle_root(column_roots))
|
||||
@@ -1,346 +0,0 @@
|
||||
from libcpp.vector cimport vector
|
||||
|
||||
def is_power_of_2(int x):
|
||||
return x > 0 and x&(x-1) == 0
|
||||
|
||||
def log2(int x):
|
||||
cdef int o = 0
|
||||
while x > 1:
|
||||
x //= 2
|
||||
o += 1
|
||||
return o
|
||||
|
||||
def raw_mul(int a, int b):
|
||||
if a*b == 0:
|
||||
return 0
|
||||
cdef int o = 0
|
||||
for i in range(log2(b) + 1):
|
||||
if b & (1<<i):
|
||||
o ^= a<<i
|
||||
return o
|
||||
|
||||
def raw_mod(int a, int b):
|
||||
blog = log2(b)
|
||||
alog = log2(a)
|
||||
while alog >= blog:
|
||||
if a & (1<<alog):
|
||||
a ^= (b << (alog - blog))
|
||||
alog -= 1
|
||||
return a
|
||||
|
||||
cdef class BinaryField:
|
||||
|
||||
cdef int modulus
|
||||
cdef int height
|
||||
cdef int order
|
||||
cdef vector[int] powers
|
||||
cdef vector[int] cache
|
||||
cdef vector[int] invcache
|
||||
|
||||
def __init__(self, int modulus):
|
||||
self.modulus = modulus
|
||||
self.height = log2(self.modulus)
|
||||
self.order = 2**self.height - 1
|
||||
for base in range(2, min(modulus - 1, 80)):
|
||||
powers = [1]
|
||||
while (len(powers) == 1 or powers[-1] != 1) and len(powers) < self.order + 2:
|
||||
powers.append(raw_mod(raw_mul(powers[-1], base), self.modulus))
|
||||
powers.pop()
|
||||
if len(powers) == self.order:
|
||||
self.cache = powers + powers
|
||||
self.invcache = [0] * (self.order + 1)
|
||||
for i, p in enumerate(powers):
|
||||
self.invcache[p] = i
|
||||
return
|
||||
raise Exception("Bad modulus")
|
||||
|
||||
def add(self, int x, int y):
|
||||
return x ^ y
|
||||
|
||||
def sub(self, int x, int y):
|
||||
return x ^ y
|
||||
|
||||
def mul(self, int x, int y):
|
||||
return 0 if x*y == 0 else self.cache[self.invcache[x] + self.invcache[y]]
|
||||
|
||||
def sqr(self, int x):
|
||||
return 0 if x == 0 else self.cache[(self.invcache[x] * 2) % self.order]
|
||||
|
||||
def div(self, int x, int y):
|
||||
return 0 if x == 0 else self.cache[self.invcache[x] + self.order - self.invcache[y]]
|
||||
|
||||
def inv(self, int x):
|
||||
assert x != 0
|
||||
return self.cache[(self.order - self.invcache[x]) % self.order]
|
||||
|
||||
def exp(self, int x, int p):
|
||||
return 1 if p == 0 else 0 if x == 0 else self.cache[(self.invcache[x] * p) % self.order]
|
||||
|
||||
def multi_inv(self, vector[int] values):
|
||||
cdef vector[int] partials = [1]
|
||||
for i in range(len(values)):
|
||||
partials.push_back(self.mul(partials[len(partials)-1], values[i] or 1))
|
||||
cdef int inv = self.inv(partials[len(partials)-1])
|
||||
cdef vector[int] outputs = [0] * len(values)
|
||||
for i in range(len(values), 0, -1):
|
||||
outputs[i-1] = self.mul(partials[i-1], inv) if values[i-1] else 0
|
||||
inv = self.mul(inv, values[i-1] or 1)
|
||||
return outputs
|
||||
|
||||
def div(self, int x, int y):
|
||||
return self.mul(x, self.inv(y))
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(self, vector[int] p, int x):
|
||||
cdef int y = 0
|
||||
cdef int power_of_x = 1
|
||||
for i, p_coeff in enumerate(p):
|
||||
y ^= self.mul(power_of_x, p_coeff)
|
||||
power_of_x = self.mul(power_of_x, x)
|
||||
return y
|
||||
|
||||
# Arithmetic for polynomials
|
||||
def add_polys(self, vector[int] a, vector[int] b):
|
||||
return [((a[i] if i < len(a) else 0) ^ (b[i] if i < len(b) else 0))
|
||||
for i in range(max(len(a), len(b)))]
|
||||
|
||||
def mul_by_const(self, vector[int] a, int c):
|
||||
return [self.mul(x, c) for x in a]
|
||||
|
||||
def mul_polys(self, vector[int] a, vector[int] b):
|
||||
cdef vector[int] o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] ^= self.mul(a[i], b[j])
|
||||
return o
|
||||
|
||||
def div_polys(self, vector[int] _a, vector[int] b):
|
||||
assert len(_a) >= len(b)
|
||||
cdef vector[int] a = [x for x in _a]
|
||||
cdef vector[int] o = []
|
||||
cdef int apos = len(a) - 1
|
||||
cdef int bpos = len(b) - 1
|
||||
cdef int diff = apos - bpos
|
||||
cdef int quot
|
||||
while diff >= 0:
|
||||
quot = self.div(a[apos], b[bpos])
|
||||
o = [quot] + o
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] ^= self.mul(b[i], quot)
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return o
|
||||
|
||||
# Build a polynomial that returns 0 at all specified xs
|
||||
def zpoly(self, vector[int] xs):
|
||||
cdef vector[int] root = [1]
|
||||
for x in xs:
|
||||
root = [0] + root
|
||||
for j in range(len(root)-1):
|
||||
root[j] ^= self.mul(root[j+1], x)
|
||||
return root
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(self, vector[int] xs, vector[int] ys):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
cdef vector[int] root = self.zpoly(xs)
|
||||
assert len(root) == len(ys) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
cdef vector[vector[int]] nums = [self.div_polys(root, [x, 1]) for x in xs]
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
cdef vector[int] denoms = [self.eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
cdef vector[int] invdenoms = self.multi_inv(denoms)
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
cdef vector[int] b = [0 for y in ys]
|
||||
cdef int yslice
|
||||
for i in range(len(xs)):
|
||||
yslice = self.mul(ys[i], invdenoms[i])
|
||||
for j in range(len(ys)):
|
||||
if nums[i][j] and ys[i]:
|
||||
b[j] ^= self.mul(nums[i][j], yslice)
|
||||
return b
|
||||
|
||||
def _simple_ft(field, vector[int] domain, vector[int] poly):
|
||||
cdef vector[int] o = [field.eval_poly_at(poly, i) for i in domain]
|
||||
return o
|
||||
|
||||
# Returns `evens` and `odds` such that:
|
||||
# poly(x) = evens(x**2+kx) + x * odds(x**2+kx)
|
||||
# poly(x+k) = evens(x**2+kx) + (x+k) * odds(x**2+kx)
|
||||
#
|
||||
# Note that this satisfies two other invariants:
|
||||
#
|
||||
# poly(x+k) - poly(x) = k * odds(x**2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x**2+kx)
|
||||
|
||||
def cast(field, poly, int k):
|
||||
if len(poly) <= 2:
|
||||
return ([poly[0]], [poly[1] if len(poly) == 2 else 0])
|
||||
assert is_power_of_2(len(poly))
|
||||
mod_power = len(poly)//2
|
||||
half_mod_power = mod_power // 2
|
||||
k_to_half_mod_power = field.exp(k, half_mod_power)
|
||||
# Calculate low = poly % (x**2 - k*x)**half_mod_power
|
||||
# and high = poly // (x**2 - k*x)**half_mod_power
|
||||
# Note that (x**2 - k*x)**n = x**2n - k**n * x**n in binary fields
|
||||
low_and_high = poly[::]
|
||||
for i in range(mod_power, half_mod_power*3):
|
||||
low_and_high[i] ^= field.mul(low_and_high[i+half_mod_power], k_to_half_mod_power)
|
||||
for i in range(half_mod_power, mod_power):
|
||||
low_and_high[i] ^= field.mul(low_and_high[i+half_mod_power], k_to_half_mod_power)
|
||||
# Recursively compute two half-size sub-problems, low and high
|
||||
low_cast = cast(field, low_and_high[:mod_power], k)
|
||||
high_cast = cast(field, low_and_high[mod_power:], k)
|
||||
# Combine the results
|
||||
return (low_cast[0] + high_cast[0], low_cast[1] + high_cast[1])
|
||||
|
||||
# Returns a polynomial p2 such that p2(x) = poly(x**2+kx)
|
||||
def compose(field, poly, k):
|
||||
if len(poly) == 2:
|
||||
return [poly[0], field.mul(poly[1], k), poly[1], 0]
|
||||
if len(poly) == 1:
|
||||
return poly + [0]
|
||||
# Largest mod_power=2**k such that mod_power >= len(poly)/2
|
||||
assert is_power_of_2(len(poly))
|
||||
mod_power = len(poly)//2
|
||||
k_to_mod_power = field.exp(k, mod_power)
|
||||
# Recursively compute two half-size sub-problems, the bottom and top half
|
||||
# of the polynomial
|
||||
low = compose(field, poly[:mod_power], k)
|
||||
high = compose(field, poly[mod_power:], k)
|
||||
# Combine them together, multiplying the top one by (x**2-k*x)**n
|
||||
# Note that (x**2 - k*x)**n = x**2n - k**n * x**n in binary fields
|
||||
o = [0] * len(poly) * 2
|
||||
for i, (L, H) in enumerate(zip(low, high)):
|
||||
o[i] ^= L
|
||||
o[i+mod_power] ^= field.mul(H, k_to_mod_power)
|
||||
o[i+2*mod_power] ^= H
|
||||
return o
|
||||
|
||||
# Equivalent to [field.eval_poly_at(poly, x) for x in domain]
|
||||
# Special thanks to www.math.clemson.edu/~sgao/papers/GM10.pdf for insights
|
||||
# though this algorithm is not exactly identical to any algorithm in the paper
|
||||
def fft(field, domain, poly):
|
||||
# Base case: constant polynomials
|
||||
# if len(domain) == 1:
|
||||
# return [poly[0]]
|
||||
if len(domain) <= 8:
|
||||
return _simple_ft(field, domain, poly)
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Get evens, odds such that:
|
||||
# poly(x) = evens(x**2+offset*x) + x * odds(x**2+offset*x)
|
||||
# poly(x+k) = evens(x**2+offset*x) + (x+k) * odds(x**2+offset*x)
|
||||
evens, odds = cast(field, poly, offset)
|
||||
# The smaller domain D = [x**2 - offset*x for x in A] = [x**2 - offset*x for x in B]
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain[::2]]
|
||||
# Two half-size sub-problems over the smaller domain, recovering
|
||||
# evaluations of evens and odds over the smaller domain
|
||||
even_points = fft(field, casted_domain, evens)
|
||||
odd_points = fft(field, casted_domain, odds)
|
||||
# Combine the evaluations of evens and odds into evaluations of poly
|
||||
L = [e ^ field.mul(d, o) for d,e,o in zip(domain[::2], even_points, odd_points)]
|
||||
R = [e ^ field.mul(d, o) for d,e,o in zip(domain[1::2], even_points, odd_points)]
|
||||
return [R[i//2] if i%2 else L[i//2] for i in range(len(domain))]
|
||||
|
||||
# The inverse function of fft, does the steps backwards
|
||||
def invfft(field, domain, vals):
|
||||
# Base case: constant polynomials
|
||||
if len(domain) == 1:
|
||||
return [vals[0]]
|
||||
# if len(domain) <= 4:
|
||||
# return field.lagrange_interp(domain, vals)
|
||||
# Split the domain into two cosets A and B, where for x in A, x+offset is in B
|
||||
offset = domain[1]
|
||||
# Compute the evaluations of the evens and odds polynomials using the invariants:
|
||||
# poly(x+k) - poly(x) = k * odds(x**2+kx)
|
||||
# poly(x)*(x+k) - poly(x+k)*x = k * evens(x**2+kx)
|
||||
L, R = vals[::2], vals[1::2]
|
||||
even_points = [field.div(field.mul(l, d ^ offset) ^ field.mul(r, d), offset) for d, l, r in zip(domain[::2], L, R)]
|
||||
odd_points = [field.div(l ^ r, offset) for d, l, r in zip(domain[::2], L, R)]
|
||||
# The smaller domain D = [x**2 - offset*x for x in A] = [x**2 - offset*x for x in B]
|
||||
casted_domain = [field.mul(x, offset ^ x) for x in domain][::2]
|
||||
# Two half-size problems over the smaller domains, recovering
|
||||
# the polynomials evens and odds
|
||||
evens = invfft(field, casted_domain, even_points)
|
||||
odds = invfft(field, casted_domain, odd_points)
|
||||
# Given evens and odds where poly(x) = evens(x**2+offset*x) + x * odds(x**2+offset*x),
|
||||
# recover poly
|
||||
composed_evens = compose(field, evens, offset) + [0]
|
||||
composed_odds = compose(field, odds, offset) + [0]
|
||||
o = [composed_evens[i] ^ composed_odds[i-1] for i in range(len(vals))]
|
||||
return o
|
||||
|
||||
# Multiplies two polynomials using the FFT method
|
||||
def mul(field, domain, p1, p2):
|
||||
assert len(p1) <= len(domain) and len(p2) <= len(domain)
|
||||
values1 = fft(field, domain, p1)
|
||||
values2 = fft(field, domain, p2)
|
||||
values3 = [field.mul(v1, v2) for v1, v2 in zip(values1, values2)]
|
||||
return invfft(field, domain, values3)
|
||||
|
||||
# Generates the polynomial `p(x) = (x - xs[0]) * (x - xs[1]) * ...`
|
||||
def zpoly(field, xs):
|
||||
if len(xs) == 0:
|
||||
# print([1], domain, xs)
|
||||
return [1]
|
||||
if len(xs) == 1:
|
||||
# print([xs[0], 1], domain, xs)
|
||||
return [xs[0], 1]
|
||||
domain = list(range(2**log2(max(xs)) * 2))
|
||||
offset = domain[1]
|
||||
zL = zpoly(field, xs[::2])
|
||||
zR = zpoly(field, xs[1::2])
|
||||
o = mul(field, domain, zL, zR)
|
||||
# print(o, domain, xs)
|
||||
return o
|
||||
|
||||
# Returns q(x) = p(x + k)
|
||||
def shift(field, poly, k):
|
||||
if len(poly) == 1:
|
||||
return poly
|
||||
# Largest mod_power=2**k such that mod_power >= len(poly)/2
|
||||
mod_power = 1
|
||||
while mod_power * 2 < len(poly):
|
||||
mod_power *= 2
|
||||
k_to_mod_power = field.exp(k, mod_power)
|
||||
# Calculate low = poly % (x+k)**mod_power
|
||||
# and high = poly // (x+k)**mod_power
|
||||
# Note that (x+k)**n = x**n + k**n for power-of-two powers in binary fields
|
||||
high = poly[mod_power:] + [0] * (2 * mod_power - len(poly))
|
||||
low = [poly[i] ^ field.mul(poly[i + mod_power], k_to_mod_power) for i in range(mod_power)]
|
||||
return shift(field, low, k) + shift(field, high, k)
|
||||
|
||||
# Interpolates the polynomial where `p(xs[i]) = vals[i]`
|
||||
def interpolate(field, xs, vals):
|
||||
domain_size = 2**(log2(max(xs)) + 1)
|
||||
assert domain_size * 2 <= 2**field.height
|
||||
domain = list(range(domain_size))
|
||||
big_domain = list(range(domain_size * 2))
|
||||
z = zpoly(field, [x for x in domain if x not in xs])
|
||||
# print("z = ", z)
|
||||
z_values = fft(field, big_domain, z)
|
||||
# print("z_values = ", z_values)
|
||||
p_times_z_values = [0] * len(domain)
|
||||
for v, d in zip(vals, xs):
|
||||
p_times_z_values[d] = field.mul(v, z_values[d])
|
||||
# print("p_times_z_values = ", p_times_z_values)
|
||||
p_times_z = invfft(field, domain, p_times_z_values)
|
||||
# print("p_times_z = ", p_times_z)
|
||||
shifted_p_times_z_values = fft(field, big_domain, p_times_z)[domain_size:]
|
||||
# print("shifted_p_times_z_values =", shifted_p_times_z_values)
|
||||
shifted_p_values = [field.div(x, y) for x,y in zip(shifted_p_times_z_values, z_values[domain_size:])]
|
||||
# print("shifted_p_values =", shifted_p_values)
|
||||
shifted_p = invfft(field, domain, shifted_p_values)
|
||||
return shift(field, shifted_p, domain_size)
|
||||
@@ -1,20 +0,0 @@
|
||||
import binary_fft
|
||||
|
||||
bigf = binary_fft.BinaryField(1033)
|
||||
poly = [x**9 % 1024 for x in range(1024)]
|
||||
z = binary_fft.fft(bigf, range(1024), poly)
|
||||
z2 = binary_fft._simple_ft(bigf, range(1024), poly)
|
||||
assert z == z2
|
||||
poly2 = binary_fft.invfft(bigf, range(1024), z)
|
||||
assert poly2 == poly
|
||||
print("Invfft and fft tests passed")
|
||||
poly3 = [x**9 % 1024 for x in range(25)]
|
||||
xs = [x*11 % 32 for x in range(25)]
|
||||
ys = [bigf.eval_poly_at(poly3, x) for x in xs]
|
||||
poly4 = binary_fft.interpolate(bigf, xs, ys)
|
||||
assert poly4[:len(poly3)] == poly3
|
||||
xs = [x*11 % 32 for x in range(1, 25)]
|
||||
ys = [bigf.eval_poly_at(poly3, x) for x in xs]
|
||||
poly5 = binary_fft.interpolate(bigf, xs, ys)
|
||||
assert poly5[:len(poly3)] == poly3
|
||||
print("Interpolation tests passed")
|
||||
@@ -1,10 +0,0 @@
|
||||
import ethereum_data_root as e
|
||||
import sys
|
||||
import os
|
||||
from hashlib import sha256
|
||||
datastream = b''.join([sha256(bytes([i])).digest() for i in range(256)])
|
||||
|
||||
if __name__ == '__main__':
|
||||
L = int(sys.argv[1])
|
||||
data = (datastream * (L // len(datastream) + 1))[:L]
|
||||
print(e.mk_data_root(data))
|
||||
@@ -1,166 +0,0 @@
|
||||
# The purpose of this script is to create an evolutionary
|
||||
# model to study the equilibrium effects of Bitcoin Unlimited-style
|
||||
# "emergent consensus". Note that the model is not yet quite
|
||||
# complete as it does not take into account the benefits of
|
||||
# mining "sister blocks" that steal transaction fees, though it
|
||||
# does give a rough idea of what equilibrium behavior
|
||||
# among the various miner policy dimensions (block accept size,
|
||||
# override depth, block creation size) looks like
|
||||
import random
|
||||
|
||||
# Block reward
|
||||
REWARD = 1000
|
||||
# Call this function to get a tx with the right fee
|
||||
TX_FEE_DISTRIBUTION = lambda: (10000 // random.randrange(5, 250)) * 0.01
|
||||
# TX_FEE_DISTRIBUTION = lambda: 20
|
||||
# Propagation time
|
||||
PROPTIME_FACTOR = 1
|
||||
|
||||
# List of tuples:
|
||||
# (default limit, n-block limit, acceptance depth, creation limit)
|
||||
strategies = []
|
||||
for i in range(4):
|
||||
for j in range(4):
|
||||
strategies.append([2 + i * 2, 100, 3, 10 + j * 4])
|
||||
|
||||
class Block():
|
||||
def __init__(self, parent, size, fees, miner):
|
||||
self.hash = random.randrange(10**20)
|
||||
self.parent = parent
|
||||
self.score = 1 if self.parent is None else parent.score + 1
|
||||
self.miner = miner
|
||||
self.size = size
|
||||
self.fees = fees
|
||||
|
||||
class Miner():
|
||||
def __init__(self, strategy, id):
|
||||
self.limit, self.big_limit, self.accept_depth, self.creation_limit = strategy
|
||||
self.chain = {}
|
||||
self.big_chain = {}
|
||||
self.head = None
|
||||
self.big_head = None
|
||||
self.id = id
|
||||
self.future = {}
|
||||
self.children = {}
|
||||
self.created = 0
|
||||
|
||||
def process_history(self, time):
|
||||
deletes = []
|
||||
for t in self.future:
|
||||
if t <= time:
|
||||
for b in self.future[t]:
|
||||
self.process_block(b)
|
||||
deletes.append(t)
|
||||
for t in deletes:
|
||||
del self.future[t]
|
||||
|
||||
def add_block(self, block, time):
|
||||
self.process_history(time)
|
||||
if time + int(block.size * PROPTIME_FACTOR) not in self.future:
|
||||
self.future[time + int(block.size * PROPTIME_FACTOR)] = [block]
|
||||
else:
|
||||
self.future[time + int(block.size * PROPTIME_FACTOR)].append(block)
|
||||
|
||||
def process_block(self, block):
|
||||
if block.size <= self.limit and (block.parent is None or block.parent.hash in self.chain):
|
||||
self.chain[block.hash] = block
|
||||
if block.score > (self.head.score if self.head else 0):
|
||||
self.head = block
|
||||
if block.size <= self.big_limit and (block.parent is None or block.parent.hash in self.big_chain):
|
||||
self.big_chain[block.hash] = block
|
||||
if block.score > (self.big_head.score if self.big_head else 0):
|
||||
self.big_head = block
|
||||
if block.score > (self.head.score if self.head else 0) + self.accept_depth:
|
||||
self.head = block
|
||||
self.chain[block.hash] = block
|
||||
if block.parent and block.parent.hash not in self.chain and block.parent.hash not in self.big_chain:
|
||||
if block.parent.hash not in self.children:
|
||||
self.children[block.parent.hash] = [block]
|
||||
else:
|
||||
self.children[block.parent.hash].append(block)
|
||||
if block.hash in self.children:
|
||||
for c in self.children[block.hash]:
|
||||
self.process_block(c)
|
||||
del self.children[block.hash]
|
||||
|
||||
def create_block(self, backlog, time):
|
||||
self.process_history(time)
|
||||
fees = sum(backlog[:self.creation_limit])
|
||||
# print 'Creating block of size %d (fees %d, seq %d)' % (self.creation_limit, fees, self.head.score + 1 if self.head else 1)
|
||||
self.created += 1
|
||||
return Block(self.head, self.creation_limit, fees, self.id)
|
||||
|
||||
|
||||
def simulate(strats):
|
||||
miners = [Miner(strat, i) for i, strat in enumerate(strats)]
|
||||
backlog = []
|
||||
for i in range(100000):
|
||||
if i % 10000 == 0:
|
||||
print 'Progress %d' % i
|
||||
backlog.append(TX_FEE_DISTRIBUTION())
|
||||
if random.random() < 0.01:
|
||||
backlog = sorted(backlog)[::-1]
|
||||
miner = random.choice(miners)
|
||||
b = miner.create_block(backlog, i)
|
||||
backlog = backlog[b.size:]
|
||||
for m in miners:
|
||||
m.add_block(b, i)
|
||||
rewards = [0] * len(miners)
|
||||
blocks = [0] * len(miners)
|
||||
h = miners[0].head
|
||||
sz = 0
|
||||
while h is not None:
|
||||
rewards[h.miner] += REWARD + h.fees
|
||||
blocks[h.miner] += 1
|
||||
h = h.parent
|
||||
sz += 1
|
||||
return rewards, blocks, [m.created for m in miners]
|
||||
|
||||
for r in range(200):
|
||||
tests = []
|
||||
for s in strategies:
|
||||
tests.append(s)
|
||||
tests.append((s[0] - 2, s[1], s[2], s[3]))
|
||||
tests.append((s[0] + 2, s[1], s[2], s[3]))
|
||||
tests.append((s[0], s[1], s[2] - 1, s[3]))
|
||||
tests.append((s[0], s[1], s[2] + 1, s[3]))
|
||||
tests.append((s[0], s[1], s[2], s[3] - 2))
|
||||
tests.append((s[0], s[1], s[2], s[3] + 2))
|
||||
NUM_TESTS = 7
|
||||
print 'Starting simulation'
|
||||
results, blks, created = simulate(tests)
|
||||
for i, s in enumerate(strategies):
|
||||
base = results[i * NUM_TESTS]
|
||||
if results[i * NUM_TESTS + 1] < base < results[i * NUM_TESTS + 2]:
|
||||
print 'Increasing base accept size beneficial at %r' % s
|
||||
s[0] += 2
|
||||
if results[i * NUM_TESTS + 1] > base > results[i * NUM_TESTS + 2] and s[0] > 2:
|
||||
print 'Decreasing base accept size beneficial at %r' % s
|
||||
s[0] -= 2
|
||||
if results[i * NUM_TESTS + 3] < base < results[i * NUM_TESTS + 4]:
|
||||
print 'Increasing override depth beneficial at %r' % s
|
||||
s[2] += 1
|
||||
if results[i * NUM_TESTS + 3] > base > results[i * NUM_TESTS + 4] and s[2] > 1:
|
||||
print 'Decreasing override depth beneficial at %r' % s
|
||||
s[2] -= 1
|
||||
if results[i * NUM_TESTS + 5] < base < results[i * NUM_TESTS + 6]:
|
||||
print 'Increasing creation size beneficial at %r' % s
|
||||
s[3] += 2
|
||||
if results[i * NUM_TESTS + 5] > base > results[i * NUM_TESTS + 6] and s[3] > 2:
|
||||
print 'Decreasing creation size beneficial at %r' % s
|
||||
s[3] -= 2
|
||||
for s in strategies:
|
||||
print s
|
||||
print 'Chain quality (per miner):', [(b * 100 / c) if c else 0 for b, c in zip(blks, created)]
|
||||
print 'Chain quality (total, non-perturbed miners only):', sum(blks[::NUM_TESTS]) * 1.0 / sum(created[::NUM_TESTS])
|
||||
if r % 20 == 0:
|
||||
print 'Control round'
|
||||
results, blks, created = simulate(strategies)
|
||||
print 'Chain quality (per miner):', [(b * 100 / c) if c else 0 for b, c in zip(blks, created)]
|
||||
print 'Chain quality (total):', sum(blks) * 1.0 / sum(created)
|
||||
|
||||
|
||||
|
||||
# results = simulate(strategies)
|
||||
# for s, r in zip(strategies, results):
|
||||
# print s[0], s[3], r
|
||||
@@ -1,143 +0,0 @@
|
||||
# THIS IS EXPERIMENTAL CODE. DO NOT USE IN PRODUCTION!
|
||||
|
||||
from hashlib import sha256
|
||||
hash_to_int = lambda x: int.from_bytes(sha256(x).digest(), 'little')
|
||||
|
||||
from py_ecc import bn128 as curve
|
||||
POINT = tuple
|
||||
SIG2 = tuple
|
||||
SIG3 = tuple
|
||||
|
||||
def serialize_int(x):
|
||||
return x.to_bytes(32, 'little')
|
||||
|
||||
def serialize_point(pt):
|
||||
x, y = pt
|
||||
return x.n.to_bytes(32, 'little') + y.n.to_bytes(32, 'little')
|
||||
|
||||
def privtopub(key):
|
||||
return curve.multiply(curve.G1, key)
|
||||
|
||||
# Linear combination of a list of points and values
|
||||
def lincomb(pts: list, values: list) -> POINT:
|
||||
o = curve.Z1
|
||||
for pt, value in zip(pts, values):
|
||||
o = curve.add(o, curve.multiply(pt, value))
|
||||
return o
|
||||
|
||||
# Make a Schnorr signature
|
||||
def sign(msg: bytes, key: int) -> SIG2:
|
||||
r = hash_to_int(msg + serialize_int(key))
|
||||
R = curve.multiply(curve.G1, r)
|
||||
e = hash_to_int(serialize_point(R) + msg)
|
||||
s = (r - key * e) % curve.curve_order
|
||||
return (s, e)
|
||||
|
||||
# Verify a Schnorr signature
|
||||
def verify(msg: bytes, KEY: POINT, sig: SIG2) -> bool:
|
||||
s, e = sig
|
||||
# Gs + Ke = G(r - ke) + Ke = Gr - Ke + Ke = R
|
||||
R = curve.add(curve.multiply(curve.G1, s), curve.multiply(KEY, e))
|
||||
new_e = hash_to_int(serialize_point(R) + msg)
|
||||
return new_e == e
|
||||
|
||||
# Make a 1-of-2 signature, knowing the first of 2 keys
|
||||
def sign_firstof2(msg: bytes, key1: int, KEY2: POINT, BASE: POINT =curve.G1) -> SIG3:
|
||||
r1 = hash_to_int(msg + serialize_int(key1))
|
||||
R1 = curve.multiply(BASE, r1)
|
||||
e1 = hash_to_int(serialize_point(R1) + msg)
|
||||
s2 = hash_to_int(msg + serialize_int(key1) + b'\x01') % curve.curve_order
|
||||
R2 = curve.add(curve.multiply(BASE, s2), curve.multiply(KEY2, e1))
|
||||
new_e = hash_to_int(serialize_point(R2) + msg)
|
||||
s1 = (r1 - key1 * new_e) % curve.curve_order
|
||||
return (s1, s2, new_e)
|
||||
|
||||
# Make a 1-of-2 signature, knowing the second of 2 keys
|
||||
def sign_secondof2(msg: bytes, KEY1: POINT, key2: int, BASE: POINT =curve.G1) -> SIG3:
|
||||
r2 = hash_to_int(msg + serialize_int(key2))
|
||||
R2 = curve.multiply(BASE, r2)
|
||||
e2 = hash_to_int(serialize_point(R2) + msg)
|
||||
s1 = hash_to_int(msg + serialize_int(key2) + b'\x01') % curve.curve_order
|
||||
R1 = curve.add(curve.multiply(BASE, s1), curve.multiply(KEY1, e2))
|
||||
new_e = hash_to_int(serialize_point(R1) + msg)
|
||||
s2 = (r2 - key2 * new_e) % curve.curve_order
|
||||
return (s1, s2, e2)
|
||||
|
||||
# Verify a 1-of-2 signature
|
||||
def verify_1of2(msg: bytes, KEY1: POINT, KEY2: POINT, sig: SIG3, BASE: POINT =curve.G1) -> bool:
|
||||
s1, s2, e = sig
|
||||
R1 = curve.add(curve.multiply(BASE, s1), curve.multiply(KEY1, e))
|
||||
new_e = hash_to_int(serialize_point(R1) + msg)
|
||||
R2 = curve.add(curve.multiply(BASE, s2), curve.multiply(KEY2, new_e))
|
||||
newer_e = hash_to_int(serialize_point(R2) + msg)
|
||||
return newer_e == e
|
||||
|
||||
# Generate points C1, D1, C2, D2, which equal
|
||||
# EITHER (A1*f, B1*f, A2*f, B2*f) OR (A2*f, B2*f, A1*f, B1*f)
|
||||
# And generate a proof that this was done correctly
|
||||
def prove_blind_and_swap(A1: POINT, B1: POINT, A2: POINT, B2: POINT, factor: int, swap=False):
|
||||
# Compute the blind-and-swap
|
||||
if not swap:
|
||||
C1, D1, C2, D2 = (curve.multiply(P, factor) for P in (A1, B1, A2, B2))
|
||||
else:
|
||||
C1, D1, C2, D2 = (curve.multiply(P, factor) for P in (A2, B2, A1, B1))
|
||||
# Fiat shamir to choose a random linear combination
|
||||
msg = b''.join(serialize_point(x) for x in (A1, B1, A2, B2, C1, C2, D1, D2))
|
||||
r = hash_to_int(msg + b'\x01')
|
||||
# Take that linear combination of the base
|
||||
BASE = lincomb((A1, B1, A2, B2), (1, r, r**2, r**3))
|
||||
# The PUB_NOSWAP point is the same linear combination of (C1, D1, C2, D2)
|
||||
# The PUB_WITHSWAP point is the same linear combination of (C2, D2, C1, D1)
|
||||
# If you are not swapping, then PUB_NOSWAP = factor * BASE
|
||||
# If you are swapping, then PUB_WITHSWAP = factor * BASE
|
||||
# So we now transformed the problem into a 1-of-2 ringsig
|
||||
if not swap:
|
||||
PUB_WITHSWAP = lincomb((C2, D2, C1, D1), (1, r, r**2, r**3))
|
||||
proof = sign_firstof2(msg, factor, PUB_WITHSWAP, BASE)
|
||||
else:
|
||||
PUB_NOSWAP = lincomb((C1, D1, C2, D2), (1, r, r**2, r**3))
|
||||
proof = sign_secondof2(msg, PUB_NOSWAP, factor, BASE)
|
||||
return C1, D1, C2, D2, proof
|
||||
|
||||
# Verify a proof of a blind-and-swap operation
|
||||
def verify_blind_and_swap(A1: POINT, B1: POINT, A2: POINT, B2: POINT,
|
||||
C1: POINT, D1: POINT, C2: POINT, D2: POINT,
|
||||
proof: SIG3):
|
||||
msg = b''.join(serialize_point(x) for x in (A1, B1, A2, B2, C1, C2, D1, D2))
|
||||
r = hash_to_int(msg + b'\x01')
|
||||
BASE = lincomb((A1, B1, A2, B2), (1, r, r**2, r**3))
|
||||
PUB_NOSWAP = lincomb((C1, D1, C2, D2), (1, r, r**2, r**3))
|
||||
PUB_WITHSWAP = lincomb((C2, D2, C1, D1), (1, r, r**2, r**3))
|
||||
return verify_1of2(msg, PUB_NOSWAP, PUB_WITHSWAP, proof, BASE)
|
||||
|
||||
def test():
|
||||
# Basic schnorr
|
||||
key1, key2 = 1337, 42069
|
||||
KEY1, KEY2 = privtopub(key1), privtopub(key2)
|
||||
sig = sign(b'cow', key1)
|
||||
assert verify(b'cow', KEY1, sig)
|
||||
print("Passed basic schnorr test")
|
||||
# 1-of-2 signatures
|
||||
BASE = curve.multiply(curve.G1, 1)
|
||||
firstof2_sig = sign_firstof2(b'cow', key1, KEY2, BASE)
|
||||
secondof2_sig = sign_secondof2(b'cow', KEY1, key2, BASE)
|
||||
assert verify_1of2(b'cow', KEY1, KEY2, firstof2_sig, BASE)
|
||||
assert verify_1of2(b'cow', KEY1, KEY2, secondof2_sig, BASE)
|
||||
print("Passed 1 of 2 signature test")
|
||||
# Blind and swap proofs
|
||||
A1, B1, A2, B2 = (curve.multiply(curve.G1, x) for x in (31337, 69042, 8675309, 299792458))
|
||||
factor = 5
|
||||
C1, D1, C2, D2, proof = prove_blind_and_swap(A1, B1, A2, B2, factor, False)
|
||||
FAKE_POINT = curve.multiply(curve.G1, 98765432123456789)
|
||||
assert (C1, D1, C2, D2) == tuple(curve.multiply(P, factor) for P in (A1, B1, A2, B2))
|
||||
assert verify_blind_and_swap(A1, B1, A2, B2, C1, D1, C2, D2, proof)
|
||||
assert not verify_blind_and_swap(A1, B1, A2, B2, C1, FAKE_POINT, C2, D2, proof)
|
||||
factor2 = 7
|
||||
E1, F1, E2, F2, proof = prove_blind_and_swap(C1, D1, C2, D2, factor2, True)
|
||||
assert (E1, F1, E2, F2) == tuple(curve.multiply(P, factor2) for P in (C2, D2, C1, D1))
|
||||
assert verify_blind_and_swap(C1, D1, C2, D2, E1, F1, E2, F2, proof)
|
||||
assert not verify_blind_and_swap(C1, D1, C2, D2, E1, F1, E2, FAKE_POINT, proof)
|
||||
print("Passed blind-and-swap test")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
@@ -1,53 +0,0 @@
|
||||
import random, sys
|
||||
|
||||
def run(width, rounds, swaps_per_round, extract_positions):
|
||||
array = list(range(width))
|
||||
o = []
|
||||
for r in range(rounds):
|
||||
# Swap
|
||||
offsets = [(r*2+1)*i for i in range(swaps_per_round)]
|
||||
swap_indices = [(extract_positions[r-1] + offset) % width for offset in offsets]
|
||||
rotation = random.randrange(len(swap_indices))
|
||||
_buffer = [array[index] for index in swap_indices]
|
||||
_buffer = _buffer[rotation:] + _buffer[:rotation]
|
||||
for index, new_value in zip(swap_indices, _buffer):
|
||||
array[index] = new_value
|
||||
# Extract
|
||||
extraction_index = extract_positions[r]
|
||||
o.append(array[extraction_index])
|
||||
array[extraction_index] = width + r
|
||||
return o
|
||||
|
||||
def test(width, rounds, swaps_per_round, runs):
|
||||
extract_positions = [random.randrange(width) for _ in range(rounds + swaps_per_round)]
|
||||
outputs = [{} for _ in range(rounds)]
|
||||
for r in range(runs):
|
||||
output = run(width, rounds, swaps_per_round, extract_positions)
|
||||
if r % 10 == 0:
|
||||
print("Round {}".format(r))
|
||||
for store, val in zip(outputs, output):
|
||||
store[val] = store.get(val, 0) + 1
|
||||
thresholds = []
|
||||
for store in outputs:
|
||||
top_freqs = sorted(store.values(), reverse=True)
|
||||
# print(top_freqs)
|
||||
await_count = sum(top_freqs) * 0.2
|
||||
for i in range(len(top_freqs)):
|
||||
await_count -= top_freqs[i]
|
||||
if await_count <= 0:
|
||||
thresholds.append(i+1)
|
||||
break
|
||||
if thresholds[-1] == 1:
|
||||
print(top_freqs[:20])
|
||||
# print(thresholds[-1])
|
||||
return thresholds
|
||||
|
||||
if __name__ == '__main__':
|
||||
width, swaps_per_round, runs = int(sys.argv[1]), int(sys.argv[2]), 1000
|
||||
rounds = width * 4
|
||||
thresholds = test(width, rounds, swaps_per_round, runs)
|
||||
for i in range(0, rounds, 10):
|
||||
print("After {} rounds, need to DoS {} validators for 20% chance of killing proposer".format(i, thresholds[i]))
|
||||
second_half = thresholds[len(thresholds)//2:]
|
||||
print("Average of second half: {}".format(sum(second_half) / len(second_half)))
|
||||
print("Frequency of 1 in second half: {}".format(second_half.count(1) / len(second_half)))
|
||||
@@ -1,97 +0,0 @@
|
||||
import copy, sys, random
|
||||
|
||||
LOG_LEVEL = [None]
|
||||
FUNCTION = (lambda: 0).__class__
|
||||
|
||||
DEFAULTS = {
|
||||
'width': 256,
|
||||
'rounds': 512,
|
||||
'swaps_per_round': 15,
|
||||
'log_level': 1
|
||||
}
|
||||
|
||||
def merge_probs(*probs):
|
||||
L = len(probs)
|
||||
#if isinstance(probs[0], list):
|
||||
# return [sum(prob[i]) / len(probs)
|
||||
o = {}
|
||||
for prob in probs:
|
||||
for k,v in prob.items():
|
||||
o[k] = o.get(k, 0) + v / L
|
||||
return o
|
||||
|
||||
def simplify_dict(obj):
|
||||
o = {}
|
||||
for x in sorted(obj.keys(), key=lambda z: obj[z], reverse=True)[:4]:
|
||||
o[x] = round(obj[x], 3)
|
||||
if sum(o.values()) < 1:
|
||||
o['other'] = round(obj[x], 3)
|
||||
return o
|
||||
|
||||
def log(contents, level):
|
||||
if level <= LOG_LEVEL[0]:
|
||||
if isinstance(contents, FUNCTION):
|
||||
contents = contents()
|
||||
print(contents)
|
||||
|
||||
def run(width, rounds, swaps_per_round):
|
||||
extract_positions = [random.randrange(width) for _ in range(rounds + swaps_per_round)]
|
||||
extract_offsets = [random.randrange(width//2) * 2 + 1 for _ in range(rounds + swaps_per_round)]
|
||||
array = [{i: 1} for i in range(width)]
|
||||
o = []
|
||||
if bin(width).count('1') != 1:
|
||||
raise Exception("Width must be a power of 2")
|
||||
if bin(swaps_per_round + 1).count('1') != 1:
|
||||
raise Exception("swaps_per_round must be a power of 2 minus 1")
|
||||
log_swaps = len(bin(swaps_per_round + 1)) - 3
|
||||
for r in range(rounds):
|
||||
log(" ------ ", 1)
|
||||
log("Round {}".format(r), 1)
|
||||
for depth in range(log_swaps):
|
||||
pivot = extract_positions[r - depth - 1]
|
||||
offset = extract_offsets[r - depth - 1]
|
||||
log("Depth {}: pivot {} offset {}".format(depth, pivot, offset), 2)
|
||||
for i in range(2**depth):
|
||||
L = (pivot + offset * i) % width
|
||||
R = (pivot + offset * (i + 2**depth)) % width
|
||||
log("Swapping {} and {}".format(L, R), 2)
|
||||
new_prob = merge_probs(array[L], array[R])
|
||||
array[L] = new_prob
|
||||
array[R] = new_prob
|
||||
extraction_index = extract_positions[r]
|
||||
o.append(array[extraction_index])
|
||||
array[extraction_index] = {width+r: 1}
|
||||
log("New index: {}, Pivot: {}, offset: {}".format(width+r, extraction_index, extract_offsets[r]), 2)
|
||||
log(lambda: [simplify_dict(x) for x in array], 3)
|
||||
return o
|
||||
|
||||
def test(width, rounds, swaps_per_round):
|
||||
thresholds = []
|
||||
outputs = run(width, rounds, swaps_per_round)
|
||||
for store in outputs:
|
||||
top_freqs = sorted(store.values(), reverse=True)
|
||||
await_count = sum(top_freqs) * 0.2
|
||||
for i in range(len(top_freqs)):
|
||||
await_count -= top_freqs[i]
|
||||
if await_count <= 0:
|
||||
thresholds.append(i+1)
|
||||
break
|
||||
return thresholds
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = {}
|
||||
for x in sys.argv:
|
||||
if '=' in x:
|
||||
prefix, postfix = x[:x.index('=')], x[x.index('=')+1:]
|
||||
args[prefix] = int(postfix)
|
||||
for arg, val in DEFAULTS.items():
|
||||
if arg not in args:
|
||||
print("Arg {} defaulted to {}".format(arg, val))
|
||||
args[arg] = val
|
||||
LOG_LEVEL[0] = args['log_level']
|
||||
thresholds = test(args['width'], args['rounds'], args['swaps_per_round'])
|
||||
for i in range(0, args['rounds'], 10):
|
||||
print("After {} rounds, need to DoS {} validators for 20% chance of killing proposer".format(i, thresholds[i]))
|
||||
second_half = thresholds[len(thresholds)//2:]
|
||||
print("Average 20%-diffusion of proposer: {}".format(sum(second_half) / len(second_half)))
|
||||
print("Frequency of 20%-diffusion = 1: {}".format(second_half.count(1) / len(second_half)))
|
||||
@@ -1,256 +0,0 @@
|
||||
from py_ecc import optimized_bls12_381 as b
|
||||
from hashlib import sha256
|
||||
from dataclasses import dataclass
|
||||
from multicombs import lincomb
|
||||
import time
|
||||
|
||||
# See page 25 and 29 of https://eprint.iacr.org/2020/1536.pdf and
|
||||
# page 49-50 of https://eprint.iacr.org/2020/499.pdf
|
||||
|
||||
# ----------------------------------------------------------------------- #
|
||||
# THIS IS AN EDUCATIONAL IMPLEMENTATION ONLY. DO NOT USE IN PRODUCTION!!! #
|
||||
# ----------------------------------------------------------------------- #
|
||||
|
||||
|
||||
BLS12_381_COFACTOR = 76329603384216526031706109802092473003
|
||||
|
||||
|
||||
def hash(x):
|
||||
return sha256(x).digest()
|
||||
|
||||
|
||||
# Creates the generator points. This is a public procedure that can be repeated
|
||||
# by anyone, so it is NOT a trusted setup
|
||||
def mk_generator_points(count):
|
||||
points = []
|
||||
x = b.FQ(1)
|
||||
while len(points) < count:
|
||||
y = (x ** 3 + b.b) ** ((b.field_modulus + 1) // 4)
|
||||
if b.is_on_curve((x, y, b.FQ(1)), b.b):
|
||||
points.append(b.multiply((x, y, b.FQ(1)), BLS12_381_COFACTOR))
|
||||
x += b.FQ(1)
|
||||
return points
|
||||
|
||||
|
||||
# Commit to some polynomial
|
||||
def commit(generator_points, poly):
|
||||
# Equivalent (but faster) to this:
|
||||
# reduce(
|
||||
# b.add,
|
||||
# [b.multiply(pt, cf) for pt, cf in zip(generator_points[:len(poly)], poly)],
|
||||
# b.Z1
|
||||
# )
|
||||
return lincomb(generator_points[:len(poly)], poly, b.add, b.Z1)
|
||||
|
||||
|
||||
# Returne True iff x is a power of two
|
||||
def is_power_of_two(x):
|
||||
return x and (x & (x-1) == 0)
|
||||
|
||||
|
||||
# Serializes an elliptic curve point. Used for Fiat-Shamir.
|
||||
def serialize_point(pt):
|
||||
pt = b.normalize(pt)
|
||||
return pt[0].n.to_bytes(64, 'little') + pt[1].n.to_bytes(64, 'little')
|
||||
|
||||
|
||||
# Returns the (left|right) half of something
|
||||
def left_half(x):
|
||||
return x[:len(x)//2]
|
||||
|
||||
|
||||
def right_half(x):
|
||||
return x[len(x)//2:]
|
||||
|
||||
|
||||
# The data structure for a proof
|
||||
@dataclass
|
||||
class Proof():
|
||||
L: list
|
||||
R: list
|
||||
tip: int
|
||||
|
||||
|
||||
# Prove that `commitment` actually is the commitment to a polynomial
|
||||
# (it does not prove _which_ polynomial)
|
||||
def prove(points, commitment, poly):
|
||||
assert is_power_of_two(len(poly))
|
||||
# Crop the base points to just what we need
|
||||
points = points[:len(poly)]
|
||||
# Left-side points for the proof
|
||||
L = []
|
||||
# Right-side points for the proof
|
||||
R = []
|
||||
# Fiat-shamir randomness value
|
||||
r = hash(serialize_point(commitment))
|
||||
# log(n) rounds...
|
||||
while len(poly) > 1:
|
||||
# Generate the left-side and right-side points
|
||||
polyL, polyR = left_half(poly), right_half(poly)
|
||||
pointsL, pointsR = left_half(points), right_half(points)
|
||||
yL = commit(pointsR, polyL)
|
||||
yR = commit(pointsL, polyR)
|
||||
L.append(yL)
|
||||
R.append(yR)
|
||||
# Generate random coefficient for recombining the L and R and commitment
|
||||
r = hash(r + serialize_point(yL) + serialize_point(yR))
|
||||
a = int.from_bytes(r, 'little') % b.curve_order
|
||||
# print('a value: ', a)
|
||||
# Generate half-size polynomial and points for the next round
|
||||
poly = [(cL + cR * a) % b.curve_order for (cL, cR) in zip(polyL, polyR)]
|
||||
points = [b.add(b.multiply(pL, a), pR) for (pL, pR) in zip(pointsL, pointsR)]
|
||||
# print('intermediate commitment:', commit(points, poly))
|
||||
return Proof(L, R, poly[0])
|
||||
|
||||
|
||||
def verify(points, commitment, proof):
|
||||
# Crop the base points to just what we need
|
||||
points = points[:2**len(proof.L)]
|
||||
# Fiat-shamir randomness value
|
||||
r = hash(serialize_point(commitment))
|
||||
# For verification, we need to generate the same random linear combination of
|
||||
# base points that the prover did.. But because we don't need to use it until
|
||||
# the end, we do it more efficiently here: when we progress through the rounds,
|
||||
# we keep track of how many times each points[i] will appear in the final
|
||||
# result...
|
||||
points_coeffs = [1]
|
||||
# log(n) rounds, just like the prover...
|
||||
for i in range(len(proof.L)):
|
||||
r = hash(r + serialize_point(proof.L[i]) + serialize_point(proof.R[i]))
|
||||
# Generate random coefficient for recombining (same as the prover)
|
||||
a = int.from_bytes(r, 'little') % b.curve_order
|
||||
# print('a value: ', a)
|
||||
# Add L and R into the commitment, applying the appropriate coefficients
|
||||
commitment = b.add(
|
||||
proof.L[i],
|
||||
b.add(
|
||||
b.multiply(commitment, a),
|
||||
b.multiply(proof.R[i], a**2)
|
||||
)
|
||||
)
|
||||
# print('intermediate commitment:', commitment)
|
||||
# Update the coefficients (points_coeffs[i] = how many times points[i] will
|
||||
# appear in the single base point of the last round)
|
||||
points_coeffs = sum([[(x*a) % b.curve_order, x] for x in points_coeffs], [])
|
||||
# Finally, we do the linear combination
|
||||
combined_point = lincomb(points, points_coeffs, b.add, b.Z1)
|
||||
# Base case check: base_point * coefficient ?= commitment
|
||||
return b.eq(b.multiply(combined_point, proof.tip), commitment)
|
||||
|
||||
|
||||
# Prove that `commitment` actually is the commitment to a polynomial
|
||||
# `p` such that `p(x) = y`
|
||||
def prove_evaluation(points, commitment, poly, x, y):
|
||||
assert is_power_of_two(len(poly))
|
||||
# Crop the base points to just what we need. We add an additional base point,
|
||||
# which we will use to mix in the _evaluation_ of the polynomial.
|
||||
points, H = points[:len(poly)], points[len(poly)]
|
||||
# Alongside the base points, we track the powers of the x coordinate we are
|
||||
# proving an evaluation for. These points get manipulated in the same way as the
|
||||
# base points do.
|
||||
xpowers = [pow(x, i, b.curve_order) for i in range(len(poly))]
|
||||
# Left-side points for the proof
|
||||
L = []
|
||||
# Right-side points for the proof
|
||||
R = []
|
||||
# Fiat-shamir randomness value
|
||||
r = hash(serialize_point(commitment) + x.to_bytes(32, 'little') + y.to_bytes(32, 'little'))
|
||||
# For security, we randomize H
|
||||
H = b.multiply(H, int.from_bytes(r, 'little') % b.curve_order)
|
||||
while len(poly) > 1:
|
||||
# Generate the left-side and right-side points, except we also mix in a similarly
|
||||
# constructed "commitment" that uses `H * powers of x` as its base instead of the
|
||||
# base points.
|
||||
polyL, polyR = left_half(poly), right_half(poly)
|
||||
pointsL, pointsR = left_half(points), right_half(points)
|
||||
xpowersL, xpowersR = left_half(xpowers), right_half(xpowers)
|
||||
yL = commit(pointsR, polyL)
|
||||
yR = commit(pointsL, polyR)
|
||||
L.append(b.add(yL, b.multiply(H, sum(a*b for a,b in zip(xpowersR, polyL)))))
|
||||
R.append(b.add(yR, b.multiply(H, sum(a*b for a,b in zip(xpowersL, polyR)))))
|
||||
# Generate random coefficient for recombining the L and R and commitment
|
||||
r = hash(r + serialize_point(L[-1]) + serialize_point(R[-1]))
|
||||
a = int.from_bytes(r, 'little') % b.curve_order
|
||||
# print('a value: ', a)
|
||||
# Generate half-size polynomial and points for the next round. Notice how we treat
|
||||
# the powers of x the same way that we do the base points
|
||||
poly = [(cL + cR * a) % b.curve_order for (cL, cR) in zip(polyL, polyR)]
|
||||
points = [b.add(b.multiply(pL, a), pR) for (pL, pR) in zip(pointsL, pointsR)]
|
||||
xpowers = [(xL * a + xR) % b.curve_order for (xL, xR) in zip(xpowersL, xpowersR)]
|
||||
# print('intermediate commitment:', b.add(commit(points, poly), b.multiply(H, sum(a*b for a,b in zip(xpowers, poly)))))
|
||||
return Proof(L, R, poly[0])
|
||||
|
||||
|
||||
# Verify a proof of an evaluation made using the above protocol
|
||||
def verify_evaluation(points, commitment, proof, x, y):
|
||||
# Crop the base points to just what we need. We add an additional base point,
|
||||
# which we will use to mix in the _evaluation_ of the polynomial.
|
||||
points, H = points[:2**len(proof.L)], points[2**len(proof.L)]
|
||||
# Powers of x, as in the prover
|
||||
xpowers = [pow(x, i, b.curve_order) for i in range(len(poly))]
|
||||
# Fiat-shamir randomness value
|
||||
r = hash(serialize_point(commitment) + x.to_bytes(32, 'little') + y.to_bytes(32, 'little'))
|
||||
# For security, we randomize H
|
||||
H = b.multiply(H, int.from_bytes(r, 'little') % b.curve_order)
|
||||
# We "mix in" H * the claimed evaluation P(x) = y. Notice that `H * P(x)` equals the
|
||||
# dot-product of `H * powers of x` and the polynomial coefficients, so it has the
|
||||
# "same format" as the polynomial commitment itself. This allows us to verify the
|
||||
# evaluation using the same technique that we use to just prove that the commitment
|
||||
# is valid
|
||||
commitment = b.add(commitment, b.multiply(H, y))
|
||||
# Track the linear combination so we can generate the final-round point and xpower,
|
||||
# just as before
|
||||
points_coeffs = [1]
|
||||
for i in range(len(proof.L)):
|
||||
# Generate random coefficient for recombining (same as the prover)
|
||||
r = hash(r + serialize_point(proof.L[i]) + serialize_point(proof.R[i]))
|
||||
a = int.from_bytes(r, 'little') % b.curve_order
|
||||
# print('a value: ', a)
|
||||
# Add L and R into the commitment, applying the appropriate coefficients
|
||||
commitment = b.add(
|
||||
proof.L[i],
|
||||
b.add(
|
||||
b.multiply(commitment, a),
|
||||
b.multiply(proof.R[i], a**2)
|
||||
)
|
||||
)
|
||||
# print('intermediate commitment:', commitment)
|
||||
# Update the coefficients (as in basic verification above)
|
||||
points_coeffs = sum([[(x*a) % b.curve_order, x] for x in points_coeffs], [])
|
||||
# Finally, we do the linear combination; same one for base points and x powers
|
||||
combined_point = lincomb(points, points_coeffs, b.add, b.Z1)
|
||||
combined_x_powers = sum(p*c for p,c in zip(xpowers, points_coeffs))
|
||||
# Base case check: base_point * coefficient ?= commitment. Note that here we
|
||||
# have to also mix H * the combined xpower into the final base point
|
||||
return b.eq(
|
||||
b.add(
|
||||
b.multiply(combined_point, proof.tip),
|
||||
b.multiply(H, (proof.tip * combined_x_powers) % b.curve_order)
|
||||
),
|
||||
commitment
|
||||
)
|
||||
|
||||
|
||||
time_cache = [time.time()]
|
||||
|
||||
|
||||
def get_time_delta():
|
||||
time_cache.append(time.time())
|
||||
return time_cache[-1] - time_cache[-2]
|
||||
|
||||
if __name__ == '__main__':
|
||||
get_time_delta()
|
||||
points = mk_generator_points(32)
|
||||
print("Generated generator points: {:.3f}s".format(get_time_delta()))
|
||||
poly = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3]
|
||||
commitment = commit(points, poly)
|
||||
print("Simple commitment generated: {:.3f}s".format(get_time_delta()))
|
||||
proof = prove(points, commitment, poly)
|
||||
print("Proof generated: {:.3f}s".format(get_time_delta()))
|
||||
print(proof)
|
||||
assert verify(points, commitment, proof)
|
||||
print("Proof verified: {:.3f}s".format(get_time_delta()))
|
||||
proof2 = prove_evaluation(points, commitment, poly, 10, 3979853562951413)
|
||||
print("Evaluation proof generated: {:.3f}s".format(get_time_delta()))
|
||||
assert verify_evaluation(points, commitment, proof2, 10, 3979853562951413)
|
||||
print("Evaluation proof verified: {:.3f}s".format(get_time_delta()))
|
||||
@@ -1,3 +0,0 @@
|
||||
~calldatacopy(0, 0, 128)
|
||||
~call(3000, 1, 0, 0, 128, 0, 32)
|
||||
return(~mload(0) == 0xfe2ec957647679d210034b65e9c7db2452910b0c)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 29 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 19 KiB |
@@ -1,42 +0,0 @@
|
||||
|
||||
import serpent
|
||||
import rlp
|
||||
from ethereum import utils
|
||||
from ethereum import tester
|
||||
from ethereum import transactions
|
||||
|
||||
sighash = serpent.compile('sighash.se.py')
|
||||
|
||||
tests = [
|
||||
[b"\x01"],
|
||||
[b"\x80", "a"],
|
||||
[b"\x81", "b"],
|
||||
[b""],
|
||||
[b"", b"\x01", b""],
|
||||
[b"", b"\x81", b""],
|
||||
[b"dog", b"c" * 54, b"\x01"],
|
||||
[b"\x01", b"c" * 55, b"pig"],
|
||||
[b"moose", b"c" * 56, b"\x00"],
|
||||
[b'\x01', b'55555555555555555555555555555555', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x88\xa7\x85r\x1b3\x17\xcaP\x96\xca\xd3S\xfcgM\xec\xe0\xf5!\xc8\xb4m\xd9\xb7E\xf3\x81d\x87\x93VD\xe0Ej\xcd\xec\x80\x11\x86(qZ\x9b\x80\xbf\xce\xe5*\r\x9d.o\xcd\x11s\xc5\xbc\x8c\xcb\xb9\xa9 ']
|
||||
]
|
||||
|
||||
s = tester.state()
|
||||
c = s.evm(sighash, sender=tester.k0, endowment=0)
|
||||
|
||||
for test in tests:
|
||||
z = s.send(tester.k0, c, 0, rlp.encode(test))
|
||||
assert z == utils.sha3(rlp.encode(test[:-1]))
|
||||
print("Passed test, gas consumed: ", s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used - s.last_tx.intrinsic_gas_used)
|
||||
|
||||
# Create transaction
|
||||
t = transactions.Transaction(0, 30 * 10**9, 2999999, '', 0, sighash)
|
||||
t.startgas = t.intrinsic_gas_used + 50000 + 200 * len(sighash)
|
||||
t.v = 27
|
||||
t.r = 45
|
||||
t.s = 79
|
||||
print("Sighash")
|
||||
print('Send %d wei to %s' % (t.startgas * t.gasprice,
|
||||
'0x'+utils.encode_hex(t.sender)))
|
||||
|
||||
print('Contract address: 0x'+utils.encode_hex(utils.mk_contract_address(t.sender, 0)))
|
||||
print('Code: 0x'+utils.encode_hex(rlp.encode(t)))
|
||||
@@ -1,75 +0,0 @@
|
||||
# Computes griefing factors of various parameter sets for Casper the
|
||||
# Friendly Finality Gadget
|
||||
|
||||
# Case 1: <1/3 non-commit (optimal if epsilon participate)
|
||||
def gf1(x1, x2, x3, x4, x5):
|
||||
return x2 / x1
|
||||
|
||||
# Case 2: censor <1/3 committers (optimal if 1/3 get censored)
|
||||
def gf2(x1, x2, x3, x4, x5):
|
||||
return 1.5 * (x1 + x2 / 3) / x2
|
||||
|
||||
# Generalized case 2
|
||||
#k = 0.25
|
||||
#def gf2(x1, x2, x3, x4, x5):
|
||||
# return (x1 * k + x2 * k**2) / (x2 * k * (1-k))
|
||||
|
||||
# Case 3: <1/3 non-prepare (optimal if epsilon participate)
|
||||
def gf3(x1, x2, x3, x4, x5):
|
||||
return x4 / x3
|
||||
|
||||
# Case 4: censor <1/3 preparers (optimal if 1/3 get censored)
|
||||
def gf4(x1, x2, x3, x4, x5):
|
||||
return 1.5 * (x3 + x4 / 3) / x4
|
||||
|
||||
# Case 5: finality preventing 1/3 non-commits
|
||||
def gf5(x1, x2, x3, x4, x5):
|
||||
return 2 * (x5 + x2 / 3) / (x5 + x1 + x2 / 3)
|
||||
|
||||
# Case 6: censor commits
|
||||
def gf6(x1, x2, x3, x4, x5):
|
||||
# Case 6a: 51% participate
|
||||
return max(1 + x2 / (x5 + x1 + x2 / 2),
|
||||
# Case 6b: 67% participate
|
||||
(x5 + x1 + x2 / 3) / (x5 + x2 / 3) / 2)
|
||||
|
||||
# Case 7: finality and commit-preventing 1/3 non-prepares
|
||||
def gf7(x1, x2, x3, x4, x5):
|
||||
return 2 * (x5 + x4 / 3) / (x5 + x3 + x4 / 3)
|
||||
|
||||
gfs = (gf1, gf2, gf3, gf4, gf5, gf6, gf7)
|
||||
|
||||
# Get the maximum griefing factor of a set of parameters
|
||||
def getmax(*args):
|
||||
return max([f(*args) for f in gfs])
|
||||
|
||||
# Get the maximum <50% griefing factor, and enforce a bound
|
||||
# of MAX_CENSOR_GF on the griefing factor of >50% coalitions
|
||||
def getmax2(*args):
|
||||
MAX_CENSOR_GF = 2
|
||||
if gf2(*args) > MAX_CENSOR_GF or gf4(*args) > MAX_CENSOR_GF or \
|
||||
gf6(*args) > MAX_CENSOR_GF:
|
||||
return 999999999999999999
|
||||
|
||||
return max(gf1(*args), gf3(*args), gf5(*args), gf7(*args))
|
||||
|
||||
# Range to test for each parameter
|
||||
my_range = [i/12. for i in range(1, 61)]
|
||||
|
||||
best_vals = (1, 0, 0, 0, 0)
|
||||
best_score = 999999999999999999
|
||||
|
||||
# print([f(5, 6, 5, 6, 0) for f in gfs])
|
||||
|
||||
for x1 in my_range:
|
||||
for x2 in my_range:
|
||||
for x3 in my_range:
|
||||
for x4 in my_range:
|
||||
o = getmax2(x1, x2, x3, x4, 1)
|
||||
if o < best_score:
|
||||
best_score = o
|
||||
best_vals = (x1, x2, x3, x4, 1)
|
||||
if o <= 1:
|
||||
print((x1, x2, x3, x4, 1), [f(x1, x2, x3, x4, 1) for f in gfs])
|
||||
print('result', best_vals, best_score)
|
||||
print([f(*best_vals) for f in gfs])
|
||||
@@ -1,35 +0,0 @@
|
||||
import math
|
||||
|
||||
# Length of an epoch in seconds
|
||||
epoch_len = 1400
|
||||
# In-protocol penalization parameter
|
||||
increment = 0.00002
|
||||
|
||||
# Parameters
|
||||
NFP = 0
|
||||
NCP = 3
|
||||
NCCP = 3
|
||||
NPP = 3
|
||||
NPCP = 3
|
||||
|
||||
def sim_offline(p):
|
||||
online, offline = 1-p, p
|
||||
for i in range(1, 999999):
|
||||
# Lost by offline validators
|
||||
offline_loss = NFP + NPP + NPCP * (offline / (online + offline))
|
||||
# Lost by online validators
|
||||
online_loss = NFP + NPCP * (offline / (online + offline))
|
||||
online *= 1 - increment * math.log(i) * online_loss
|
||||
offline *= 1 - increment * math.log(i) * offline_loss
|
||||
if i % 100 == 0 or online >= 2 * offline:
|
||||
print("%d epochs (%.2f days): online %.4f offline %.4f" %
|
||||
(i, epoch_len * i / 86400, online, offline))
|
||||
# If the remaining validators can commit, break
|
||||
if online >= 2 * offline:
|
||||
return (1-p, online, epoch_len * i / 86400)
|
||||
|
||||
sim_offline(0.4)
|
||||
|
||||
#results = [sim_offline(i * 0.01) for i in range(34, 100)]
|
||||
#for col in results:
|
||||
# print("%.4f, %.4f, %.4f" % col)
|
||||
@@ -1,77 +0,0 @@
|
||||
macro calldatachar($x):
|
||||
div(calldataload($x), 2**248)
|
||||
|
||||
macro calldatabytes_as_int($x, $b):
|
||||
div(calldataload($x), 256**(32-$b))
|
||||
|
||||
def any():
|
||||
# Positions of the values that we are changing
|
||||
positions = array(256)
|
||||
# Index of the next position we are adding to
|
||||
positionIndex = 0
|
||||
# Output data (main part)
|
||||
data = string(~calldatasize() + 1024)
|
||||
# Index of where we are adding data
|
||||
dataPos = 0
|
||||
# Can only parse lists; check the length of the list and set the index
|
||||
# in calldata to the right start position
|
||||
c = calldatachar(0)
|
||||
if c < 192:
|
||||
~invalid()
|
||||
if c < 248:
|
||||
if ~calldatasize() != 1 + (c - 192):
|
||||
~invalid()
|
||||
i = 1
|
||||
else:
|
||||
L = calldatabytes_as_int(i + 1, c - 247)
|
||||
if ~calldatasize() != 1 + (c - 247) + L:
|
||||
~invalid()
|
||||
i = 1 + (c - 247)
|
||||
# Main loop
|
||||
while i < ~calldatasize():
|
||||
# Get type (single byte, short, long)
|
||||
c = calldatachar(i)
|
||||
positions[positionIndex] = dataPos
|
||||
positionIndex += 1
|
||||
# Single byte < 0x80
|
||||
if c < 128:
|
||||
mstore(data + dataPos, 1)
|
||||
calldatacopy(data + dataPos + 32, i, 1)
|
||||
i += 1
|
||||
dataPos += 33
|
||||
# Short (up to 55 bytes)
|
||||
elif c < 184:
|
||||
mstore(data + dataPos, c - 128)
|
||||
calldatacopy(data + dataPos + 32, i + 1, c - 128)
|
||||
# Output could have been in single-byte format
|
||||
if c == 129:
|
||||
if calldatachar(i + 1) < 128:
|
||||
~invalid()
|
||||
i += c - 128 + 1
|
||||
dataPos += (c - 128) + 32
|
||||
# Long (56 or more bytes)
|
||||
elif c < 192:
|
||||
L = calldatabytes_as_int(i + 1, c - 183)
|
||||
# Forbid leading zero byte
|
||||
if calldatachar(i + 1) == 0:
|
||||
~invalid()
|
||||
# Forbid too short values
|
||||
if L < 56:
|
||||
~invalid()
|
||||
mstore(data + dataPos, L)
|
||||
calldatacopy(data + dataPos + 32, i + 1 + c - 183, L)
|
||||
i += (c - 183) + 1 + L
|
||||
dataPos += L + 32
|
||||
else:
|
||||
# Not handling nested arrays
|
||||
~invalid()
|
||||
if positionIndex > 32:
|
||||
~invalid()
|
||||
positions[positionIndex] = dataPos
|
||||
output = string(2048)
|
||||
i = 0
|
||||
while i <= positionIndex:
|
||||
output[i] = positions[i] + positionIndex * 32 + 32
|
||||
i += 1
|
||||
mcopy(output + positionIndex * 32 + 32, data, dataPos)
|
||||
~return(output, positionIndex * 32 + dataPos + 32)
|
||||
@@ -1,64 +0,0 @@
|
||||
# Fetches the char from calldata at position $x
|
||||
macro calldatachar($x):
|
||||
div(calldataload($x), 2**248)
|
||||
|
||||
# Fetches the next $b bytes from calldata starting at position $x
|
||||
# Assumes that there is nothing important in memory at bytes 0..63
|
||||
macro calldatabytes_as_int($x, $b):
|
||||
~mstore(32-$b, calldataload($x))
|
||||
~mload(0)
|
||||
|
||||
# Position in calldata
|
||||
with pos = 0:
|
||||
# First char in calldata
|
||||
with c0 = calldatachar(0):
|
||||
# The start of the array must be in 192...255 because it represents
|
||||
# a list length
|
||||
# Length ++ body case
|
||||
if c0 < 248:
|
||||
pos = 1
|
||||
# Length of length ++ length ++ body case
|
||||
else:
|
||||
pos = (c0 - 246)
|
||||
# Start position of the list (save it)
|
||||
with startpos = pos:
|
||||
# Start position of the previous element
|
||||
with lastpos = 0:
|
||||
# Keep looping until we hit the end of the input
|
||||
while pos < ~calldatasize():
|
||||
# Next char in calldata
|
||||
with c = calldatachar(pos):
|
||||
lastpos = pos
|
||||
# Single byte 0x00...0x7f body case
|
||||
if c < 128:
|
||||
pos += 1
|
||||
# Length ++ body case
|
||||
elif c < 184:
|
||||
pos += c - 127
|
||||
# Length of length ++ length ++ body case
|
||||
elif c < 192:
|
||||
pos += calldatabytes_as_int(pos + 1, c - 183) + (c - 182)
|
||||
|
||||
# Length of new RLP list
|
||||
with newlen = lastpos - startpos:
|
||||
# Length ++ body case
|
||||
if newlen < 56:
|
||||
# Store length in the first byte
|
||||
~mstore8(0, 192 + newlen)
|
||||
# Copy calldata right after length
|
||||
~calldatacopy(1, startpos, newlen)
|
||||
# Return the hash
|
||||
return(~sha3(0, 1 + newlen))
|
||||
else:
|
||||
# The log256 of the length (ie. length of length)
|
||||
# Can't go higher than 16777216 bytes due to gas limits
|
||||
with _log = if(newlen < 256, 1, if(newlen < 65536, 2, 3)):
|
||||
# Store the length
|
||||
~mstore(0, newlen)
|
||||
# Store the length of the length right before the length
|
||||
with 31minuslog = 31 - _log:
|
||||
~mstore8(31minuslog, 247 + _log)
|
||||
# Store the rest of the data
|
||||
~calldatacopy(32, startpos, newlen)
|
||||
# Return the hash
|
||||
return(~sha3(31minuslog, 1 + _log + newlen))
|
||||
@@ -1,559 +0,0 @@
|
||||
# Information about validators
|
||||
validators: public({
|
||||
# Amount of wei the validator holds
|
||||
deposit: wei_value,
|
||||
# The dynasty the validator is joining
|
||||
dynasty_start: num,
|
||||
# The dynasty the validator joined for the first time
|
||||
original_dynasty_start: num,
|
||||
# The dynasty the validator is leaving
|
||||
dynasty_end: num,
|
||||
# The timestamp at which the validator can withdraw
|
||||
withdrawal_epoch: num,
|
||||
# The address which the validator's signatures must verify to (to be later replaced with validation code)
|
||||
addr: address,
|
||||
# Addess to withdraw to
|
||||
withdrawal_addr: address,
|
||||
# Previous epoch in which this validator committed
|
||||
prev_commit_epoch: num
|
||||
}[num])
|
||||
|
||||
# The current dynasty (validator set changes between dynasties)
|
||||
dynasty: public(num)
|
||||
|
||||
# Amount of wei added to the total deposits in the next dynasty
|
||||
next_dynasty_wei_delta: wei_value
|
||||
|
||||
# Amount of wei added to the total deposits in the dynasty after that
|
||||
second_next_dynasty_wei_delta: wei_value
|
||||
|
||||
# Total deposits during this dynasty
|
||||
total_deposits: public(wei_value[num])
|
||||
|
||||
# Mapping of dynasty to start epoch of that dynasty
|
||||
dynasty_start_epoch: public(num[num])
|
||||
|
||||
# Mapping of epoch to what dynasty it is
|
||||
dynasty_in_epoch: public(num[num])
|
||||
|
||||
# Information for use in processing cryptoeconomic commitments
|
||||
consensus_messages: public({
|
||||
# How many prepares are there for this hash (hash of message hash + view source) from the current dynasty
|
||||
prepares: wei_value[bytes32],
|
||||
# Bitmap of which validator IDs have already prepared
|
||||
prepare_bitmap: num256[num][bytes32],
|
||||
# From the previous dynasty
|
||||
prev_dyn_prepares: wei_value[bytes32],
|
||||
# Is a prepare referencing the given ancestry hash justified?
|
||||
ancestry_hash_justified: bool[bytes32],
|
||||
# Is a commit on the given hash justified?
|
||||
hash_justified: bool[bytes32],
|
||||
# How many commits are there for this hash
|
||||
commits: wei_value[bytes32],
|
||||
# And from the previous dynasty
|
||||
prev_dyn_commits: wei_value[bytes32],
|
||||
# Was the block committed?
|
||||
committed: bool,
|
||||
# Value used to calculate the per-epoch fee that validators should be charged
|
||||
deposit_scale_factor: decimal
|
||||
}[num]) # index: epoch
|
||||
|
||||
# A bitmap, where the ith bit of dynasty_mark[arg1][arg2] shows
|
||||
# whether or not validator arg1 is active during dynasty arg2*256+i
|
||||
dynasty_mask: num256[num][num]
|
||||
|
||||
# ancestry[x][y] = k > 0: x is a kth generation ancestor of y
|
||||
ancestry: public(num[bytes32][bytes32])
|
||||
|
||||
# Number of validators
|
||||
nextValidatorIndex: public(num)
|
||||
|
||||
# Time between blocks
|
||||
block_time: timedelta
|
||||
|
||||
# Length of an epoch in blocks
|
||||
epoch_length: num
|
||||
|
||||
# Withdrawal delay
|
||||
withdrawal_delay: timedelta
|
||||
|
||||
# Delay after which a message can be slashed due to absence of justification
|
||||
insufficiency_slash_delay: timedelta
|
||||
|
||||
# Current epoch
|
||||
current_epoch: public(num)
|
||||
|
||||
# Can withdraw destroyed deposits
|
||||
owner: address
|
||||
|
||||
# Total deposits destroyed
|
||||
total_destroyed: wei_value
|
||||
|
||||
# Sighash calculator library address
|
||||
sighasher: address
|
||||
|
||||
# Purity checker library address
|
||||
purity_checker: address
|
||||
|
||||
# Reward for preparing or committing, as fraction of deposit size
|
||||
reward_factor: public(decimal)
|
||||
|
||||
# Desired total ether given out assuming 1M ETH deposited
|
||||
reward_at_1m_eth: decimal
|
||||
|
||||
# Have I already been initialized?
|
||||
initialized: bool
|
||||
|
||||
# Log topic for prepare
|
||||
prepare_log_topic: bytes32
|
||||
|
||||
# Log topic for commit
|
||||
commit_log_topic: bytes32
|
||||
|
||||
def initiate():
|
||||
assert not self.initialized
|
||||
self.initialized = True
|
||||
# Set Casper parameters
|
||||
self.block_time = 14
|
||||
self.epoch_length = 100
|
||||
# Only ~11.5 days, for testing purposes
|
||||
self.withdrawal_delay = 1000000
|
||||
# Only ~1 day, for testing purposes
|
||||
self.insufficiency_slash_delay = 86400
|
||||
# Temporary backdoor for testing purposes (to allow recovering destroyed deposits)
|
||||
self.owner = 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6
|
||||
# Add an initial validator
|
||||
self.validators[0] = {
|
||||
deposit: as_wei_value(3, ether),
|
||||
dynasty_start: 0,
|
||||
dynasty_end: 1000000000000000000000000000000,
|
||||
original_dynasty_start: 0,
|
||||
withdrawal_epoch: 1000000000000000000000000000000,
|
||||
addr: 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6,
|
||||
withdrawal_addr: 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6,
|
||||
prev_commit_epoch: 0,
|
||||
}
|
||||
self.nextValidatorIndex = 1
|
||||
# Initialize the epoch counter
|
||||
self.current_epoch = block.number / self.epoch_length
|
||||
# Set the sighash calculator address
|
||||
self.sighasher = 0x476c2cA9a7f3B16FeCa86512276271FAf63B6a24
|
||||
# Set the purity checker address
|
||||
self.purity_checker = 0xD7a3BD6C9eA32efF147d067f907AE6b22d436F91
|
||||
# Set an initial root of the epoch hash chain
|
||||
self.consensus_messages[0].ancestry_hash_justified[0x0000000000000000000000000000000000000000000000000000000000000000] = True
|
||||
# self.consensus_messages[0].committed = True
|
||||
# Set initial total deposit counter
|
||||
self.total_deposits[0] = as_wei_value(3, ether)
|
||||
# Set deposit scale factor
|
||||
self.consensus_messages[0].deposit_scale_factor = 1000000000000000000.0
|
||||
# Total ETH given out assuming 1m ETH deposits
|
||||
self.reward_at_1m_eth = 12.5
|
||||
# Log topics for prepare and commit
|
||||
self.prepare_log_topic = sha3("prepare()")
|
||||
self.commit_log_topic = sha3("commit()")
|
||||
|
||||
# Called at the start of any epoch
|
||||
def initialize_epoch(epoch: num):
|
||||
# Check that the epoch actually has started
|
||||
computed_current_epoch = block.number / self.epoch_length
|
||||
assert epoch <= computed_current_epoch and epoch == self.current_epoch + 1
|
||||
# Set the epoch number
|
||||
self.current_epoch = epoch
|
||||
# Increment the dynasty
|
||||
if self.consensus_messages[epoch - 1].committed:
|
||||
self.dynasty += 1
|
||||
self.total_deposits[self.dynasty] = self.total_deposits[self.dynasty - 1] + self.next_dynasty_wei_delta
|
||||
self.next_dynasty_wei_delta = self.second_next_dynasty_wei_delta
|
||||
self.second_next_dynasty_wei_delta = 0
|
||||
self.dynasty_start_epoch[self.dynasty] = epoch
|
||||
self.dynasty_in_epoch[epoch] = self.dynasty
|
||||
# Compute square root factor
|
||||
ether_deposited_as_number = self.total_deposits[self.dynasty] / as_wei_value(1, ether)
|
||||
sqrt = ether_deposited_as_number / 2.0
|
||||
for i in range(20):
|
||||
sqrt = (sqrt + (ether_deposited_as_number / sqrt)) / 2
|
||||
# Reward factor is the reward given for preparing or committing as a
|
||||
# fraction of that validator's deposit size
|
||||
base_coeff = 1.0 / sqrt * (self.reward_at_1m_eth / 1000)
|
||||
# Rules:
|
||||
# * You are penalized 2x per epoch
|
||||
# * If you prepare, you get 1.5x, and if you commit you get another 1.5x
|
||||
# Hence, assuming 100% performance, your reward per epoch is x
|
||||
self.reward_factor = 1.5 * base_coeff
|
||||
self.consensus_messages[epoch].deposit_scale_factor = self.consensus_messages[epoch - 1].deposit_scale_factor * (1 - 2 * base_coeff)
|
||||
|
||||
# Send a deposit to join the validator set
|
||||
def deposit(validation_addr: address, withdrawal_addr: address):
|
||||
assert self.current_epoch == block.number / self.epoch_length
|
||||
assert extract32(raw_call(self.purity_checker, concat('\xa1\x90>\xab', as_bytes32(validation_addr)), gas=500000, outsize=32), 0) != as_bytes32(0)
|
||||
self.validators[self.nextValidatorIndex] = {
|
||||
deposit: msg.value,
|
||||
dynasty_start: self.dynasty + 2,
|
||||
original_dynasty_start: self.dynasty + 2,
|
||||
dynasty_end: 1000000000000000000000000000000,
|
||||
withdrawal_epoch: 1000000000000000000000000000000,
|
||||
addr: validation_addr,
|
||||
withdrawal_addr: withdrawal_addr,
|
||||
prev_commit_epoch: 0,
|
||||
}
|
||||
self.nextValidatorIndex += 1
|
||||
self.second_next_dynasty_wei_delta += msg.value
|
||||
|
||||
# Log in or log out from the validator set. A logged out validator can log
|
||||
# back in later, if they do not log in for an entire withdrawal period,
|
||||
# they can get their money out
|
||||
def flick_status(logout_msg: bytes <= 1024):
|
||||
assert self.current_epoch == block.number / self.epoch_length
|
||||
# Get hash for signature, and implicitly assert that it is an RLP list
|
||||
# consisting solely of RLP elements
|
||||
sighash = extract32(raw_call(self.sighasher, logout_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values = RLPList(logout_msg, [num, num, bool, bytes])
|
||||
validator_index = values[0]
|
||||
epoch = values[1]
|
||||
login_flag = values[2]
|
||||
sig = values[3]
|
||||
assert self.current_epoch == epoch
|
||||
# Signature check
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Logging in
|
||||
if login_flag:
|
||||
# Check that we are logged out
|
||||
assert self.validators[validator_index].dynasty_end < self.dynasty
|
||||
# Check that we logged out for less than 3840 dynasties (min: ~2 months)
|
||||
assert self.validators[validator_index].dynasty_end >= self.dynasty - 3840
|
||||
# Apply the per-epoch deposit penalty
|
||||
prev_login_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_start]
|
||||
prev_logout_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_end + 1]
|
||||
self.validators[validator_index].deposit = \
|
||||
floor(self.validators[validator_index].deposit *
|
||||
(self.consensus_messages[prev_logout_epoch].deposit_scale_factor /
|
||||
self.consensus_messages[prev_login_epoch].deposit_scale_factor))
|
||||
# Log back in
|
||||
# Go through the dynasty mask to clear out the ineligible dynasties
|
||||
old_ds = self.validators[validator_index].dynasty_end
|
||||
new_ds = self.dynasty + 2
|
||||
for i in range(old_ds / 256, old_ds / 256 + 16):
|
||||
if old_ds > i * 256:
|
||||
s = old_ds % 256
|
||||
else:
|
||||
s = 0
|
||||
if new_ds < i * 256 + 256:
|
||||
e = new_ds % 256
|
||||
else:
|
||||
e = 256
|
||||
self.dynasty_mask[validator_index][i] = num256_sub(shift(as_num256(1), e), shift(as_num256(1), s))
|
||||
if e < 256:
|
||||
break
|
||||
self.validators[validator_index].dynasty_start = new_ds
|
||||
self.validators[validator_index].dynasty_end = 1000000000000000000000000000000
|
||||
self.second_next_dynasty_wei_delta += self.validators[validator_index].deposit
|
||||
# Logging out
|
||||
else:
|
||||
# Check that we haven't already withdrawn
|
||||
assert self.validators[validator_index].dynasty_end >= self.dynasty + 2
|
||||
# Set the end dynasty
|
||||
self.validators[validator_index].dynasty_end = self.dynasty + 2
|
||||
self.second_next_dynasty_wei_delta -= self.validators[validator_index].deposit
|
||||
# Set the withdrawal date
|
||||
self.validators[validator_index].withdrawal_epoch = self.current_epoch + self.withdrawal_delay / self.block_time / self.epoch_length
|
||||
|
||||
# Removes a validator from the validator pool
|
||||
def delete_validator(validator_index: num):
|
||||
self.validators[validator_index] = {
|
||||
deposit: 0,
|
||||
dynasty_start: 0,
|
||||
dynasty_end: 0,
|
||||
original_dynasty_start: 0,
|
||||
withdrawal_epoch: 0,
|
||||
addr: None,
|
||||
withdrawal_addr: None,
|
||||
prev_commit_epoch: 0,
|
||||
}
|
||||
|
||||
# Withdraw deposited ether
|
||||
def withdraw(validator_index: num):
|
||||
# Check that we can withdraw
|
||||
assert self.current_epoch >= self.validators[validator_index].withdrawal_epoch
|
||||
# Apply the per-epoch deposit penalty
|
||||
prev_login_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_start]
|
||||
prev_logout_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_end + 1]
|
||||
self.validators[validator_index].deposit = \
|
||||
floor(self.validators[validator_index].deposit *
|
||||
(self.consensus_messages[prev_logout_epoch].deposit_scale_factor /
|
||||
self.consensus_messages[prev_login_epoch].deposit_scale_factor))
|
||||
# Withdraw
|
||||
send(self.validators[validator_index].withdrawal_addr, self.validators[validator_index].deposit)
|
||||
self.delete_validator(validator_index)
|
||||
|
||||
# Checks if a given validator could have prepared in a given epoch
|
||||
def check_eligible_in_epoch(validator_index: num, epoch: num) -> num(const):
|
||||
# Time limit for submitting a prepare
|
||||
assert epoch > self.current_epoch - 3840
|
||||
# Original starting dynasty of the validator; fail if before
|
||||
do = self.validators[validator_index].original_dynasty_start
|
||||
# Ending dynasty of the current login period
|
||||
de = self.validators[validator_index].dynasty_end
|
||||
# Dynasty of the prepare
|
||||
dc = self.dynasty_in_epoch[epoch]
|
||||
# Dynasty before the prepare (for prev dynasty checking)
|
||||
dp = dc - 1
|
||||
# Check against mask to see if the dynasty was eligible before
|
||||
cur_in_mask = bitwise_and(self.dynasty_mask[validator_index][dc / 256], shift(as_num256(1), dc % 256))
|
||||
prev_in_mask = bitwise_and(self.dynasty_mask[validator_index][dp / 256], shift(as_num256(1), dp % 256))
|
||||
o = 0
|
||||
# Return result as bitmask, bit 1 = in_current_dynasty, bit 0 = in_prev_dynasty
|
||||
if ((do <= dc and cur_in_mask == as_num256(0)) and dc < de):
|
||||
o += 2
|
||||
if ((do <= dp and prev_in_mask == as_num256(0)) and dp < de):
|
||||
o += 1
|
||||
return o
|
||||
|
||||
# Process a prepare message
|
||||
def prepare(prepare_msg: bytes <= 1024):
|
||||
# Get hash for signature, and implicitly assert that it is an RLP list
|
||||
# consisting solely of RLP elements
|
||||
sighash = extract32(raw_call(self.sighasher, prepare_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values = RLPList(prepare_msg, [num, num, bytes32, bytes32, num, bytes32, bytes])
|
||||
validator_index = values[0]
|
||||
epoch = values[1]
|
||||
hash = values[2]
|
||||
ancestry_hash = values[3]
|
||||
source_epoch = values[4]
|
||||
source_ancestry_hash = values[5]
|
||||
sig = values[6]
|
||||
new_ancestry_hash = sha3(concat(hash, ancestry_hash))
|
||||
# Hash for purposes of identifying this (epoch, hash, ancestry_hash, source_epoch, source_ancestry_hash) combination
|
||||
sourcing_hash = sha3(concat(as_bytes32(epoch), hash, ancestry_hash, as_bytes32(source_epoch), source_ancestry_hash))
|
||||
# Check the signature
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that we are in an epoch after we started validating
|
||||
assert self.current_epoch >= self.dynasty_start_epoch[self.validators[validator_index].dynasty_start]
|
||||
# Check that this prepare has not yet been made
|
||||
assert not bitwise_and(self.consensus_messages[epoch].prepare_bitmap[sourcing_hash][validator_index / 256],
|
||||
shift(as_num256(1), validator_index % 256))
|
||||
# Check that we are at least (epoch length / 4) blocks into the epoch
|
||||
# assert block.number % self.epoch_length >= self.epoch_length / 4
|
||||
# Check that this validator was active in either the previous dynasty or the current one
|
||||
epochcheck = self.check_eligible_in_epoch(validator_index, epoch)
|
||||
in_current_dynasty = epochcheck >= 2
|
||||
in_prev_dynasty = (epochcheck % 2) == 1
|
||||
assert in_current_dynasty or in_prev_dynasty
|
||||
# Check that the prepare is on top of a justified prepare
|
||||
assert self.consensus_messages[source_epoch].ancestry_hash_justified[source_ancestry_hash]
|
||||
# Check that we have not yet prepared for this epoch
|
||||
# Pay the reward if the prepare was submitted in time and the blockhash is correct
|
||||
this_validators_deposit = self.validators[validator_index].deposit
|
||||
if self.current_epoch == epoch: #if blockhash(epoch * self.epoch_length) == hash:
|
||||
reward = floor(this_validators_deposit * self.reward_factor)
|
||||
self.validators[validator_index].deposit += reward
|
||||
self.total_deposits[self.dynasty] += reward
|
||||
# Can't prepare for this epoch again
|
||||
self.consensus_messages[epoch].prepare_bitmap[sourcing_hash][validator_index / 256] = \
|
||||
bitwise_or(self.consensus_messages[epoch].prepare_bitmap[sourcing_hash][validator_index / 256],
|
||||
shift(as_num256(1), validator_index % 256))
|
||||
# self.validators[validator_index].max_prepared = epoch
|
||||
# Record that this prepare took place
|
||||
curdyn_prepares = self.consensus_messages[epoch].prepares[sourcing_hash]
|
||||
if in_current_dynasty:
|
||||
curdyn_prepares += this_validators_deposit
|
||||
self.consensus_messages[epoch].prepares[sourcing_hash] = curdyn_prepares
|
||||
prevdyn_prepares = self.consensus_messages[epoch].prev_dyn_prepares[sourcing_hash]
|
||||
if in_prev_dynasty:
|
||||
prevdyn_prepares += this_validators_deposit
|
||||
self.consensus_messages[epoch].prev_dyn_prepares[sourcing_hash] = prevdyn_prepares
|
||||
# If enough prepares with the same epoch_source and hash are made,
|
||||
# then the hash value is justified for commitment
|
||||
if (curdyn_prepares >= self.total_deposits[self.dynasty] * 2 / 3 and \
|
||||
prevdyn_prepares >= self.total_deposits[self.dynasty - 1] * 2 / 3) and \
|
||||
not self.consensus_messages[epoch].ancestry_hash_justified[new_ancestry_hash]:
|
||||
self.consensus_messages[epoch].ancestry_hash_justified[new_ancestry_hash] = True
|
||||
self.consensus_messages[epoch].hash_justified[hash] = True
|
||||
# Add a parent-child relation between ancestry hashes to the ancestry table
|
||||
if not self.ancestry[ancestry_hash][new_ancestry_hash]:
|
||||
self.ancestry[ancestry_hash][new_ancestry_hash] = 1
|
||||
raw_log([self.prepare_log_topic], prepare_msg)
|
||||
|
||||
# Process a commit message
|
||||
def commit(commit_msg: bytes <= 1024):
|
||||
sighash = extract32(raw_call(self.sighasher, commit_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values = RLPList(commit_msg, [num, num, bytes32, num, bytes])
|
||||
validator_index = values[0]
|
||||
epoch = values[1]
|
||||
hash = values[2]
|
||||
prev_commit_epoch = values[3]
|
||||
sig = values[4]
|
||||
# Check the signature
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that we are in the right epoch
|
||||
assert self.current_epoch == block.number / self.epoch_length
|
||||
assert self.current_epoch == epoch
|
||||
# Check that we are at least (epoch length / 2) blocks into the epoch
|
||||
# assert block.number % self.epoch_length >= self.epoch_length / 2
|
||||
# Check that the commit is justified
|
||||
assert self.consensus_messages[epoch].hash_justified[hash]
|
||||
# Check that this validator was active in either the previous dynasty or the current one
|
||||
epochcheck = self.check_eligible_in_epoch(validator_index, epoch)
|
||||
in_current_dynasty = epochcheck >= 2
|
||||
in_prev_dynasty = (epochcheck % 2) == 1
|
||||
assert in_current_dynasty or in_prev_dynasty
|
||||
# Check that we have not yet committed for this epoch
|
||||
assert self.validators[validator_index].prev_commit_epoch == prev_commit_epoch
|
||||
assert prev_commit_epoch < epoch
|
||||
self.validators[validator_index].prev_commit_epoch = epoch
|
||||
this_validators_deposit = self.validators[validator_index].deposit
|
||||
# Pay the reward if the blockhash is correct
|
||||
if True: #if blockhash(epoch * self.epoch_length) == hash:
|
||||
reward = floor(this_validators_deposit * self.reward_factor)
|
||||
self.validators[validator_index].deposit += reward
|
||||
self.total_deposits[self.dynasty] += reward
|
||||
# Can't commit for this epoch again
|
||||
# self.validators[validator_index].max_committed = epoch
|
||||
# Record that this commit took place
|
||||
if in_current_dynasty:
|
||||
self.consensus_messages[epoch].commits[hash] += this_validators_deposit
|
||||
if in_prev_dynasty:
|
||||
self.consensus_messages[epoch].prev_dyn_commits[hash] += this_validators_deposit
|
||||
# Record if sufficient commits have been made for the block to be finalized
|
||||
if (self.consensus_messages[epoch].commits[hash] >= self.total_deposits[self.dynasty] * 2 / 3 and \
|
||||
self.consensus_messages[epoch].prev_dyn_commits[hash] >= self.total_deposits[self.dynasty - 1] * 2 / 3) and \
|
||||
not self.consensus_messages[epoch].committed:
|
||||
self.consensus_messages[epoch].committed = True
|
||||
raw_log([self.commit_log_topic], commit_msg)
|
||||
|
||||
# Cannot make two prepares in the same epoch
|
||||
def double_prepare_slash(prepare1: bytes <= 1000, prepare2: bytes <= 1000):
|
||||
# Get hash for signature, and implicitly assert that it is an RLP list
|
||||
# consisting solely of RLP elements
|
||||
sighash1 = extract32(raw_call(self.sighasher, prepare1, gas=200000, outsize=32), 0)
|
||||
sighash2 = extract32(raw_call(self.sighasher, prepare2, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values1 = RLPList(prepare1, [num, num, bytes32, bytes32, num, bytes32, bytes])
|
||||
values2 = RLPList(prepare2, [num, num, bytes32, bytes32, num, bytes32, bytes])
|
||||
validator_index = values1[0]
|
||||
epoch1 = values1[1]
|
||||
sig1 = values1[6]
|
||||
assert validator_index == values2[0]
|
||||
epoch2 = values2[1]
|
||||
sig2 = values2[6]
|
||||
# Check the signatures
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash1, sig1), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash2, sig2), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that they're from the same epoch
|
||||
assert epoch1 == epoch2
|
||||
# Check that they're not the same message
|
||||
assert sighash1 != sighash2
|
||||
# Delete the offending validator, and give a 4% "finder's fee"
|
||||
validator_deposit = self.validators[validator_index].deposit
|
||||
send(msg.sender, validator_deposit / 25)
|
||||
self.total_destroyed += validator_deposit * 24 / 25
|
||||
self.total_deposits[self.dynasty] -= (validator_deposit - validator_deposit / 25)
|
||||
self.delete_validator(validator_index)
|
||||
|
||||
def prepare_commit_inconsistency_slash(prepare_msg: bytes <= 1024, commit_msg: bytes <= 1024):
|
||||
# Get hash for signature, and implicitly assert that it is an RLP list
|
||||
# consisting solely of RLP elements
|
||||
sighash1 = extract32(raw_call(self.sighasher, prepare_msg, gas=200000, outsize=32), 0)
|
||||
sighash2 = extract32(raw_call(self.sighasher, commit_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values1 = RLPList(prepare_msg, [num, num, bytes32, bytes32, num, bytes32, bytes])
|
||||
values2 = RLPList(commit_msg, [num, num, bytes32, num, bytes])
|
||||
validator_index = values1[0]
|
||||
prepare_epoch = values1[1]
|
||||
prepare_source_epoch = values1[4]
|
||||
sig1 = values1[6]
|
||||
assert validator_index == values2[0]
|
||||
commit_epoch = values2[1]
|
||||
sig2 = values2[4]
|
||||
# Check the signatures
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash1, sig1), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash2, sig2), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that the prepare refers to something older than the commit
|
||||
assert prepare_source_epoch < commit_epoch
|
||||
# Check that the prepare is newer than the commit
|
||||
assert commit_epoch < prepare_epoch
|
||||
# Delete the offending validator, and give a 4% "finder's fee"
|
||||
validator_deposit = self.validators[validator_index].deposit
|
||||
send(msg.sender, validator_deposit / 25)
|
||||
self.total_destroyed += validator_deposit * 24 / 25
|
||||
self.total_deposits[self.dynasty] -= validator_deposit
|
||||
self.delete_validator(validator_index)
|
||||
|
||||
def commit_non_justification_slash(commit_msg: bytes <= 1024):
|
||||
sighash = extract32(raw_call(self.sighasher, commit_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values = RLPList(commit_msg, [num, num, bytes32, num, bytes])
|
||||
validator_index = values[0]
|
||||
epoch = values[1]
|
||||
hash = values[2]
|
||||
sig = values[4]
|
||||
# Check the signature
|
||||
assert len(sig) == 96
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that the commit is old enough
|
||||
assert self.current_epoch == block.number / self.epoch_length
|
||||
assert (self.current_epoch - epoch) * self.epoch_length * self.block_time > self.insufficiency_slash_delay
|
||||
assert not self.consensus_messages[epoch].hash_justified[hash]
|
||||
# Delete the offending validator, and give a 4% "finder's fee"
|
||||
validator_deposit = self.validators[validator_index].deposit
|
||||
send(msg.sender, validator_deposit / 25)
|
||||
self.total_destroyed += validator_deposit * 24 / 25
|
||||
self.total_deposits[self.dynasty] -= validator_deposit
|
||||
self.delete_validator(validator_index)
|
||||
|
||||
# Fill in the table for which hash is what-degree ancestor of which other hash
|
||||
def derive_parenthood(older: bytes32, hash: bytes32, newer: bytes32):
|
||||
assert sha3(concat(hash, older)) == newer
|
||||
self.ancestry[older][newer] = 1
|
||||
|
||||
# Fill in the table for which hash is what-degree ancestor of which other hash
|
||||
def derive_ancestry(oldest: bytes32, middle: bytes32, recent: bytes32):
|
||||
assert self.ancestry[middle][recent]
|
||||
assert self.ancestry[oldest][middle]
|
||||
self.ancestry[oldest][recent] = self.ancestry[oldest][middle] + self.ancestry[middle][recent]
|
||||
|
||||
def prepare_non_justification_slash(prepare_msg: bytes <= 1024) -> num:
|
||||
# Get hash for signature, and implicitly assert that it is an RLP list
|
||||
# consisting solely of RLP elements
|
||||
sighash = extract32(raw_call(self.sighasher, prepare_msg, gas=200000, outsize=32), 0)
|
||||
# Extract parameters
|
||||
values = RLPList(prepare_msg, [num, num, bytes32, bytes32, num, bytes32, bytes])
|
||||
validator_index = values[0]
|
||||
epoch = values[1]
|
||||
hash = values[2]
|
||||
ancestry_hash = values[3]
|
||||
source_epoch = values[4]
|
||||
source_ancestry_hash = values[5]
|
||||
sig = values[6]
|
||||
# Check the signature
|
||||
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
|
||||
# Check that the view change is old enough
|
||||
assert self.current_epoch == block.number / self.epoch_length
|
||||
assert (self.current_epoch - epoch) * self.block_time * self.epoch_length > self.insufficiency_slash_delay
|
||||
# Check that the source ancestry hash not had enough prepares, OR that there is not the
|
||||
# correct ancestry link between the current ancestry hash and source ancestry hash
|
||||
c1 = self.consensus_messages[source_epoch].ancestry_hash_justified[source_ancestry_hash]
|
||||
if epoch - 1 > source_epoch:
|
||||
c2 = self.ancestry[source_ancestry_hash][ancestry_hash] == epoch - 1 - source_epoch
|
||||
else:
|
||||
c2 = source_ancestry_hash == ancestry_hash
|
||||
assert not (c1 and c2)
|
||||
# Delete the offending validator, and give a 4% "finder's fee"
|
||||
validator_deposit = self.validators[validator_index].deposit
|
||||
send(msg.sender, validator_deposit / 25)
|
||||
self.total_destroyed += validator_deposit * 24 / 25
|
||||
self.total_deposits[self.dynasty] -= validator_deposit
|
||||
self.delete_validator(validator_index)
|
||||
|
||||
# Temporary backdoor for testing purposes (to allow recovering destroyed deposits)
|
||||
def owner_withdraw():
|
||||
send(self.owner, self.total_destroyed)
|
||||
self.total_destroyed = 0
|
||||
|
||||
# Change backdoor address (set to zero to remove entirely)
|
||||
def change_owner(new_owner: address):
|
||||
if self.owner == msg.sender:
|
||||
self.owner = new_owner
|
||||
@@ -1,400 +0,0 @@
|
||||
from ethereum import tester as t
|
||||
from ethereum import utils, state_transition, transactions, abi
|
||||
from viper import compiler
|
||||
import serpent
|
||||
from ethereum.slogging import LogRecorder, configure_logging, set_level
|
||||
config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
|
||||
#configure_logging(config_string=config_string)
|
||||
import rlp
|
||||
s = t.state()
|
||||
t.languages['viper'] = compiler.Compiler()
|
||||
t.gas_limit = 9999999
|
||||
|
||||
EPOCH_LENGTH = 100
|
||||
|
||||
def inject_tx(txhex):
|
||||
tx = rlp.decode(utils.decode_hex(txhex[2:]), transactions.Transaction)
|
||||
s.state.set_balance(tx.sender, tx.startgas * tx.gasprice)
|
||||
state_transition.apply_transaction(s.state, tx)
|
||||
contract_address = utils.mk_contract_address(tx.sender, 0)
|
||||
assert s.state.get_code(contract_address)
|
||||
return contract_address
|
||||
|
||||
code_template = """
|
||||
~calldatacopy(0, 0, 128)
|
||||
~call(3000, 1, 0, 0, 128, 0, 32)
|
||||
return(~mload(0) == %s)
|
||||
"""
|
||||
|
||||
def mk_validation_code(address):
|
||||
return serpent.compile(code_template % (utils.checksum_encode(address)))
|
||||
|
||||
# Install RLP decoder library
|
||||
rlp_decoder_address = inject_tx( '0xf90237808506fc23ac00830330888080b902246102128061000e60003961022056600060007f010000000000000000000000000000000000000000000000000000000000000060003504600060c082121515585760f882121561004d5760bf820336141558576001905061006e565b600181013560f783036020035260005160f6830301361415585760f6820390505b5b368112156101c2577f010000000000000000000000000000000000000000000000000000000000000081350483602086026040015260018501945060808112156100d55760018461044001526001828561046001376001820191506021840193506101bc565b60b881121561014357608081038461044001526080810360018301856104600137608181141561012e5760807f010000000000000000000000000000000000000000000000000000000000000060018401350412151558575b607f81038201915060608103840193506101bb565b60c08112156101b857600182013560b782036020035260005160388112157f010000000000000000000000000000000000000000000000000000000000000060018501350402155857808561044001528060b6838501038661046001378060b6830301830192506020810185019450506101ba565bfe5b5b5b5061006f565b601f841315155857602060208502016020810391505b6000821215156101fc578082604001510182826104400301526020820391506101d8565b808401610420528381018161044003f350505050505b6000f31b2d4f')
|
||||
|
||||
# Install sig hasher
|
||||
|
||||
s.state.set_balance('0x6e7406512b244843c1171840dfcd3d7532d979fe', 7291200000000000)
|
||||
|
||||
sighasher_address = inject_tx( '0xf9016d808506fc23ac0083026a508080b9015a6101488061000e6000396101565660007f01000000000000000000000000000000000000000000000000000000000000006000350460f8811215610038576001915061003f565b60f6810391505b508060005b368312156100c8577f01000000000000000000000000000000000000000000000000000000000000008335048391506080811215610087576001840193506100c2565b60b881121561009d57607f8103840193506100c1565b60c08112156100c05760b68103600185013560b783036020035260005101840193505b5b5b50610044565b81810360388112156100f4578060c00160005380836001378060010160002060e052602060e0f3610143565b61010081121561010557600161011b565b6201000081121561011757600261011a565b60035b5b8160005280601f038160f701815382856020378282600101018120610140526020610140f350505b505050505b6000f31b2d4f')
|
||||
|
||||
# Install purity checker
|
||||
|
||||
purity_checker_address = inject_tx( '0xf90467808506fc23ac00830583c88080b904546104428061000e60003961045056600061033f537c0100000000000000000000000000000000000000000000000000000000600035047f80010000000000000000000000000000000000000030ffff1c0e00000000000060205263a1903eab8114156103f7573659905901600090523660048237600435608052506080513b806020015990590160009052818152602081019050905060a0526080513b600060a0516080513c6080513b8060200260200159905901600090528181526020810190509050610100526080513b806020026020015990590160009052818152602081019050905061016052600060005b602060a05103518212156103c957610100601f8360a051010351066020518160020a161561010a57fe5b80606013151561011e57607f811315610121565b60005b1561014f5780607f036101000a60018460a0510101510482602002610160510152605e8103830192506103b2565b60f18114801561015f5780610164565b60f282145b905080156101725780610177565b60f482145b9050156103aa5760028212151561019e5760606001830360200261010051015112156101a1565b60005b156101bc57607f6001830360200261010051015113156101bf565b60005b156101d157600282036102605261031e565b6004821215156101f057600360018303602002610100510151146101f3565b60005b1561020d57605a6002830360200261010051015114610210565b60005b1561022b57606060038303602002610100510151121561022e565b60005b1561024957607f60038303602002610100510151131561024c565b60005b1561025e57600482036102605261031d565b60028212151561027d57605a6001830360200261010051015114610280565b60005b1561029257600282036102605261031c565b6002821215156102b157609060018303602002610100510151146102b4565b60005b156102c657600282036102605261031b565b6002821215156102e65760806001830360200261010051015112156102e9565b60005b156103035760906001830360200261010051015112610306565b60005b1561031857600282036102605261031a565bfe5b5b5b5b5b604060405990590160009052600081526102605160200261016051015181602001528090502054156103555760016102a052610393565b60306102605160200261010051015114156103755760016102a052610392565b60606102605160200261010051015114156103915760016102a0525b5b5b6102a051151561039f57fe5b6001830192506103b1565b6001830192505b5b8082602002610100510152600182019150506100e0565b50506001604060405990590160009052600081526080518160200152809050205560016102e05260206102e0f35b63c23697a8811415610440573659905901600090523660048237600435608052506040604059905901600090526000815260805181602001528090502054610300526020610300f35b505b6000f31b2d4f')
|
||||
|
||||
ct = abi.ContractTranslator([{'name': 'check(address)', 'type': 'function', 'constant': True, 'inputs': [{'name': 'addr', 'type': 'address'}], 'outputs': [{'name': 'out', 'type': 'bool'}]}, {'name': 'submit(address)', 'type': 'function', 'constant': False, 'inputs': [{'name': 'addr', 'type': 'address'}], 'outputs': [{'name': 'out', 'type': 'bool'}]}])
|
||||
# Check that the RLP decoding library and the sig hashing library are "pure"
|
||||
assert utils.big_endian_to_int(s.send(t.k0, purity_checker_address, 0, ct.encode('submit', [rlp_decoder_address]))) == 1
|
||||
assert utils.big_endian_to_int(s.send(t.k0, purity_checker_address, 0, ct.encode('submit', [sighasher_address]))) == 1
|
||||
|
||||
k1_valcode_addr = s.send(t.k0, "", 0, mk_validation_code(t.a0))
|
||||
assert utils.big_endian_to_int(s.send(t.k0, purity_checker_address, 0, ct.encode('submit', [k1_valcode_addr]))) == 1
|
||||
|
||||
# Install Casper
|
||||
|
||||
casper_code = open('simple_casper.v.py').read().replace('0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6', utils.checksum_encode(k1_valcode_addr)) \
|
||||
.replace('0x476c2cA9a7f3B16FeCa86512276271FAf63B6a24', utils.checksum_encode(sighasher_address)) \
|
||||
.replace('0xD7a3BD6C9eA32efF147d067f907AE6b22d436F91', utils.checksum_encode(purity_checker_address))
|
||||
|
||||
print('Casper code length', len(compiler.compile(casper_code)))
|
||||
|
||||
casper = s.abi_contract(casper_code, language='viper', startgas=5555555)
|
||||
|
||||
print('Gas consumed to launch Casper', s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used)
|
||||
|
||||
# Helper functions for making a prepare, commit, login and logout message
|
||||
|
||||
def mk_prepare(validator_index, epoch, hash, ancestry_hash, source_epoch, source_ancestry_hash, key):
|
||||
sighash = utils.sha3(rlp.encode([validator_index, epoch, hash, ancestry_hash, source_epoch, source_ancestry_hash]))
|
||||
v, r, s = utils.ecdsa_raw_sign(sighash, key)
|
||||
sig = utils.encode_int32(v) + utils.encode_int32(r) + utils.encode_int32(s)
|
||||
return rlp.encode([validator_index, epoch, hash, ancestry_hash, source_epoch, source_ancestry_hash, sig])
|
||||
|
||||
def mk_commit(validator_index, epoch, hash, prev_commit_epoch, key):
|
||||
sighash = utils.sha3(rlp.encode([validator_index, epoch, hash, prev_commit_epoch]))
|
||||
v, r, s = utils.ecdsa_raw_sign(sighash, key)
|
||||
sig = utils.encode_int32(v) + utils.encode_int32(r) + utils.encode_int32(s)
|
||||
return rlp.encode([validator_index, epoch, hash, prev_commit_epoch, sig])
|
||||
|
||||
def mk_status_flicker(validator_index, epoch, login, key):
|
||||
sighash = utils.sha3(rlp.encode([validator_index, epoch, login]))
|
||||
v, r, s = utils.ecdsa_raw_sign(sighash, key)
|
||||
sig = utils.encode_int32(v) + utils.encode_int32(r) + utils.encode_int32(s)
|
||||
return rlp.encode([validator_index, epoch, login, sig])
|
||||
|
||||
# Begin the test
|
||||
|
||||
print("Starting tests")
|
||||
casper.initiate()
|
||||
# Initialize the first epoch
|
||||
s.state.block_number = EPOCH_LENGTH
|
||||
print('foo', casper.initialize_epoch(1))
|
||||
assert casper.get_nextValidatorIndex() == 1
|
||||
start = s.snapshot()
|
||||
print("Epoch initialized")
|
||||
print("Reward factor: %.8f" % (casper.get_reward_factor() * 2 / 3))
|
||||
# Send a prepare message
|
||||
#configure_logging(config_string=config_string)
|
||||
casper.prepare(mk_prepare(0, 1, '\x35' * 32, '\x00' * 32, 0, '\x00' * 32, t.k0))
|
||||
print('Gas consumed for a prepare: %d (including %d intrinsic gas)' %
|
||||
(s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used, s.last_tx.intrinsic_gas_used))
|
||||
epoch_1_anchash = utils.sha3(b'\x35' * 32 + b'\x00' * 32)
|
||||
assert casper.get_consensus_messages__hash_justified(1, b'\x35' * 32)
|
||||
assert casper.get_consensus_messages__ancestry_hash_justified(1, epoch_1_anchash)
|
||||
print("Prepare message processed")
|
||||
try:
|
||||
casper.prepare(mk_prepare(0, 1, '\x35' * 32, '\x00' * 32, 0, '\x00' * 32, t.k0))
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
print("Prepare message fails the second time")
|
||||
# Send a commit message
|
||||
casper.commit(mk_commit(0, 1, '\x35' * 32, 0, t.k0))
|
||||
print('Gas consumed for a commit: %d (including %d intrinsic gas)' %
|
||||
(s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used, s.last_tx.intrinsic_gas_used))
|
||||
# Check that we committed
|
||||
assert casper.get_consensus_messages__committed(1)
|
||||
print("Commit message processed")
|
||||
# Initialize the second epoch
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(2)
|
||||
# Check that the dynasty increased as expected
|
||||
assert casper.get_dynasty() == 1
|
||||
assert casper.get_total_deposits(1) == casper.get_total_deposits(0) > 0
|
||||
print("Second epoch initialized, dynasty increased as expected")
|
||||
# Send a prepare message
|
||||
casper.prepare(mk_prepare(0, 2, '\x45' * 32, epoch_1_anchash, 1, epoch_1_anchash, t.k0))
|
||||
# Send a commit message
|
||||
epoch_2_commit = mk_commit(0, 2, '\x45' * 32, 1, t.k0)
|
||||
casper.commit(epoch_2_commit)
|
||||
epoch_2_anchash = utils.sha3(b'\x45' * 32 + epoch_1_anchash)
|
||||
assert casper.get_consensus_messages__ancestry_hash_justified(2, epoch_2_anchash)
|
||||
# Check that we committed
|
||||
assert casper.get_consensus_messages__committed(2)
|
||||
# Initialize the third epoch
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(3)
|
||||
print("Second epoch prepared and committed, third epoch initialized")
|
||||
# Test the NO_DBL_PREPARE slashing condition
|
||||
p1 = mk_prepare(0, 3, '\x56' * 32, epoch_2_anchash, 2, epoch_2_anchash, t.k0)
|
||||
p2 = mk_prepare(0, 3, '\x57' * 32, epoch_2_anchash, 2, epoch_2_anchash, t.k0)
|
||||
snapshot = s.snapshot()
|
||||
casper.double_prepare_slash(p1, p2)
|
||||
s.revert(snapshot)
|
||||
print("NO_DBL_PREPARE slashing condition works")
|
||||
# Test the PREPARE_COMMIT_CONSISTENCY slashing condition
|
||||
p3 = mk_prepare(0, 3, '\x58' * 32, epoch_2_anchash, 0, b'\x00' * 32, t.k0)
|
||||
snapshot = s.snapshot()
|
||||
casper.prepare_commit_inconsistency_slash(p3, epoch_2_commit)
|
||||
s.revert(snapshot)
|
||||
print("PREPARE_COMMIT_CONSISTENCY slashing condition works")
|
||||
# Finish the third epoch
|
||||
casper.prepare(p1)
|
||||
casper.commit(mk_commit(0, 3, '\x56' * 32, 2, t.k0))
|
||||
epoch_3_anchash = utils.sha3(b'\x56' * 32 + epoch_2_anchash)
|
||||
assert casper.get_consensus_messages__ancestry_hash_justified(3, epoch_3_anchash)
|
||||
assert casper.get_consensus_messages__committed(3)
|
||||
# Initialize the fourth epoch. Not doing prepares or commits during this epoch.
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(4)
|
||||
assert casper.get_dynasty() == 3
|
||||
epoch_4_anchash = utils.sha3(b'\x67' * 32 + epoch_3_anchash)
|
||||
# Not publishing this prepare for the time being
|
||||
p4 = mk_prepare(0, 4, '\x78' * 32, '\x12' * 32, 3, '\x24' * 32, t.k0)
|
||||
# Initialize the fifth epoch
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(5)
|
||||
print("Epochs up to 5 initialized")
|
||||
# Dynasty not incremented because no commits were made
|
||||
assert casper.get_dynasty() == 3
|
||||
epoch_5_anchash = utils.sha3(b'\x78' * 32 + epoch_4_anchash)
|
||||
p5 = mk_prepare(0, 5, '\x78' * 32, epoch_4_anchash, 3, epoch_3_anchash, t.k0)
|
||||
casper.prepare(p5)
|
||||
# Test the COMMIT_REQ slashing condition
|
||||
kommit = mk_commit(0, 5, b'\x80' * 32, 3, t.k0)
|
||||
epoch_inc = 1 + int(86400 / 14 / EPOCH_LENGTH)
|
||||
s.state.block_number += EPOCH_LENGTH * epoch_inc
|
||||
print("Speeding up time to test remaining two slashing conditions")
|
||||
for i in range(6, 6 + epoch_inc):
|
||||
casper.initialize_epoch(i)
|
||||
print("Epochs up to %d initialized" % (6 + epoch_inc))
|
||||
snapshot = s.snapshot()
|
||||
casper.commit_non_justification_slash(kommit)
|
||||
s.revert(snapshot)
|
||||
try:
|
||||
casper.commit_non_justification_slash(0, epoch_2_commit)
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
print("COMMIT_REQ slashing condition works")
|
||||
# Test the PREPARE_REQ slashing condition
|
||||
casper.derive_parenthood(epoch_3_anchash, b'\x67' * 32, epoch_4_anchash)
|
||||
assert casper.get_ancestry(epoch_3_anchash, epoch_4_anchash) == 1
|
||||
assert casper.get_ancestry(epoch_4_anchash, epoch_5_anchash) == 1
|
||||
casper.derive_ancestry(epoch_3_anchash, epoch_4_anchash, epoch_5_anchash)
|
||||
assert casper.get_ancestry(epoch_3_anchash, epoch_5_anchash) == 2
|
||||
snapshot = s.snapshot()
|
||||
casper.prepare_non_justification_slash(p4)
|
||||
s.revert(snapshot)
|
||||
try:
|
||||
casper.prepare_non_justification_slash(p5)
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
print("PREPARE_REQ slashing condition works")
|
||||
|
||||
print("Restarting the chain for test 2")
|
||||
# Restart the chain
|
||||
s.revert(start)
|
||||
assert casper.get_dynasty() == 0
|
||||
assert casper.get_current_epoch() == 1
|
||||
assert casper.get_consensus_messages__ancestry_hash_justified(0, b'\x00' * 32)
|
||||
print("Epoch 1 initialized")
|
||||
for k in (t.k1, t.k2, t.k3, t.k4, t.k5, t.k6):
|
||||
valcode_addr = s.send(t.k0, '', 0, mk_validation_code(utils.privtoaddr(k)))
|
||||
assert utils.big_endian_to_int(s.send(t.k0, purity_checker_address, 0, ct.encode('submit', [valcode_addr]))) == 1
|
||||
casper.deposit(valcode_addr, utils.privtoaddr(k), value=3 * 10**18)
|
||||
print("Processed 6 deposits")
|
||||
casper.prepare(mk_prepare(0, 1, b'\x10' * 32, b'\x00' * 32, 0, b'\x00' * 32, t.k0))
|
||||
casper.commit(mk_commit(0, 1, b'\x10' * 32, 0, t.k0))
|
||||
epoch_1_anchash = utils.sha3(b'\x10' * 32 + b'\x00' * 32)
|
||||
assert casper.get_consensus_messages__committed(1)
|
||||
print("Prepared and committed")
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(2)
|
||||
print("Epoch 2 initialized")
|
||||
assert casper.get_dynasty() == 1
|
||||
casper.prepare(mk_prepare(0, 2, b'\x20' * 32, epoch_1_anchash, 1, epoch_1_anchash, t.k0))
|
||||
casper.commit(mk_commit(0, 2, b'\x20' * 32, 1, t.k0))
|
||||
epoch_2_anchash = utils.sha3(b'\x20' * 32 + epoch_1_anchash)
|
||||
assert casper.get_consensus_messages__committed(2)
|
||||
print("Confirmed that one key is still sufficient to prepare and commit")
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(3)
|
||||
print("Epoch 3 initialized")
|
||||
assert casper.get_dynasty() == 2
|
||||
assert 3 * 10**18 <= casper.get_total_deposits(0) < 4 * 10**18
|
||||
assert 3 * 10**18 <= casper.get_total_deposits(1) < 4 * 10**18
|
||||
assert 21 * 10**18 <= casper.get_total_deposits(2) < 22 * 10**18
|
||||
print("Confirmed new total_deposits")
|
||||
try:
|
||||
# Try to log out, but sign with the wrong key
|
||||
casper.flick_status(mk_status_flicker(0, 3, 0, t.k1))
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
# Log out
|
||||
casper.flick_status(mk_status_flicker(4, 3, 0, t.k4))
|
||||
casper.flick_status(mk_status_flicker(5, 3, 0, t.k5))
|
||||
casper.flick_status(mk_status_flicker(6, 3, 0, t.k6))
|
||||
print("Logged out three validators")
|
||||
# Validators leave the fwd validator set in dynasty 4
|
||||
assert casper.get_validators__dynasty_end(4) == 4
|
||||
epoch_3_anchash = utils.sha3(b'\x30' * 32 + epoch_2_anchash)
|
||||
# Prepare from one validator
|
||||
casper.prepare(mk_prepare(0, 3, b'\x30' * 32, epoch_2_anchash, 2, epoch_2_anchash, t.k0))
|
||||
# Not prepared yet
|
||||
assert not casper.get_consensus_messages__hash_justified(3, b'\x30' * 32)
|
||||
print("Prepare from one validator no longer sufficient")
|
||||
# Prepare from 3 more validators
|
||||
for i, k in ((1, t.k1), (2, t.k2), (3, t.k3)):
|
||||
casper.prepare(mk_prepare(i, 3, b'\x30' * 32, epoch_2_anchash, 2, epoch_2_anchash, k))
|
||||
# Still not prepared
|
||||
assert not casper.get_consensus_messages__hash_justified(3, b'\x30' * 32)
|
||||
print("Prepare from four of seven validators still not sufficient")
|
||||
# Prepare from a fifth validator
|
||||
casper.prepare(mk_prepare(4, 3, b'\x30' * 32, epoch_2_anchash, 2, epoch_2_anchash, t.k4))
|
||||
# NOW we're prepared!
|
||||
assert casper.get_consensus_messages__hash_justified(3, b'\x30' * 32)
|
||||
print("Prepare from five of seven validators sufficient!")
|
||||
# Five commits
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2, t.k3, t.k4]):
|
||||
casper.commit(mk_commit(i, 3, b'\x30' * 32, 2 if i == 0 else 0, k))
|
||||
# And we committed!
|
||||
assert casper.get_consensus_messages__committed(3)
|
||||
print("Commit from five of seven validators sufficient")
|
||||
# Start epoch 4
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(4)
|
||||
assert casper.get_dynasty() == 3
|
||||
print("Epoch 4 initialized")
|
||||
# Prepare and commit
|
||||
epoch_4_anchash = utils.sha3(b'\x40' * 32 + epoch_3_anchash)
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2, t.k3, t.k4]):
|
||||
casper.prepare(mk_prepare(i, 4, b'\x40' * 32, epoch_3_anchash, 3, epoch_3_anchash, k))
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2, t.k3, t.k4]):
|
||||
casper.commit(mk_commit(i, 4, b'\x40' * 32, 3, k))
|
||||
assert casper.get_consensus_messages__committed(4)
|
||||
print("Prepared and committed")
|
||||
# Start epoch 5 / dynasty 4
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(5)
|
||||
print("Epoch 5 initialized")
|
||||
assert casper.get_dynasty() == 4
|
||||
assert 21 * 10**18 <= casper.get_total_deposits(3) <= 22 * 10**18
|
||||
assert 12 * 10**18 <= casper.get_total_deposits(4) <= 13 * 10**18
|
||||
epoch_5_anchash = utils.sha3(b'\x50' * 32 + epoch_4_anchash)
|
||||
# Do three prepares
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
casper.prepare(mk_prepare(i, 5, b'\x50' * 32, epoch_4_anchash, 4, epoch_4_anchash, k))
|
||||
# Three prepares are insufficient because there are still five validators in the rear validator set
|
||||
assert not casper.get_consensus_messages__hash_justified(5, b'\x50' * 32)
|
||||
print("Three prepares insufficient, as rear validator set still has seven")
|
||||
# Do two more prepares
|
||||
for i, k in [(3, t.k3), (4, t.k4)]:
|
||||
casper.prepare(mk_prepare(i, 5, b'\x50' * 32, epoch_4_anchash, 4, epoch_4_anchash, k))
|
||||
# Now we're good!
|
||||
assert casper.get_consensus_messages__hash_justified(5, b'\x50' * 32)
|
||||
print("Five prepares sufficient")
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2, t.k3, t.k4]):
|
||||
casper.commit(mk_commit(i, 5, b'\x50' * 32, 4, k))
|
||||
# Committed!
|
||||
assert casper.get_consensus_messages__committed(5)
|
||||
# Start epoch 6 / dynasty 5
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(6)
|
||||
assert casper.get_dynasty() == 5
|
||||
print("Epoch 6 initialized")
|
||||
# Log back in
|
||||
old_deposit_start = casper.get_dynasty_start_epoch(casper.get_validators__dynasty_start(4))
|
||||
old_deposit_end = casper.get_dynasty_start_epoch(casper.get_validators__dynasty_end(4) + 1)
|
||||
old_deposit = casper.get_validators__deposit(4)
|
||||
# Explanation:
|
||||
# * During dynasty 0, the validator deposited, so he joins the current set in dynasty 2
|
||||
# (epoch 3), and the previous set in dynasty 3 (epoch 4)
|
||||
# * During dynasty 2, the validator logs off, so he leaves the current set in dynasty 4
|
||||
# (epoch 5) and the previous set in dynasty 5 (epoch 6)
|
||||
assert [casper.check_eligible_in_epoch(4, i) for i in range(7)] == [0, 0, 0, 2, 3, 1, 0]
|
||||
casper.flick_status(mk_status_flicker(4, 6, 1, t.k4))
|
||||
# Explanation:
|
||||
# * During dynasty 7, the validator will log on again. Hence, the dynasty mask
|
||||
# should include dynasties 4, 5, 6
|
||||
assert [casper.check_eligible_in_epoch(4, i) for i in range(7)] == [0, 0, 0, 2, 3, 1, 0]
|
||||
new_deposit = casper.get_validators__deposit(4)
|
||||
print("One validator logging back in")
|
||||
print("Penalty from %d epochs: %.4f" % (old_deposit_end - old_deposit_start, 1 - new_deposit / old_deposit))
|
||||
assert casper.get_validators__dynasty_start(4) == 7
|
||||
# Here three prepares and three commits should be sufficient!
|
||||
epoch_6_anchash = utils.sha3(b'\x60' * 32 + epoch_5_anchash)
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
casper.prepare(mk_prepare(i, 6, b'\x60' * 32, epoch_5_anchash, 5, epoch_5_anchash, k))
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
casper.commit(mk_commit(i, 6, b'\x60' * 32, 5, k))
|
||||
assert casper.get_consensus_messages__committed(6)
|
||||
print("Three of four prepares and commits sufficient")
|
||||
# Start epoch 7 / dynasty 6
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(7)
|
||||
assert casper.get_dynasty() == 6
|
||||
print("Epoch 7 initialized")
|
||||
# Here three prepares and three commits should be sufficient!
|
||||
epoch_7_anchash = utils.sha3(b'\x70' * 32 + epoch_6_anchash)
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
#if i == 1:
|
||||
# configure_logging(config_string=config_string)
|
||||
casper.prepare(mk_prepare(i, 7, b'\x70' * 32, epoch_6_anchash, 6, epoch_6_anchash, k))
|
||||
#if i == 1:
|
||||
# import sys
|
||||
# sys.exit()
|
||||
print('Gas consumed for first prepare', s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used)
|
||||
print('Gas consumed for second prepare', s.state.receipts[-2].gas_used - s.state.receipts[-3].gas_used)
|
||||
print('Gas consumed for third prepare', s.state.receipts[-3].gas_used - s.state.receipts[-4].gas_used)
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
casper.commit(mk_commit(i, 7, b'\x70' * 32, 6, k))
|
||||
print('Gas consumed for first commit', s.state.receipts[-1].gas_used - s.state.receipts[-2].gas_used)
|
||||
print('Gas consumed for second commit', s.state.receipts[-2].gas_used - s.state.receipts[-3].gas_used)
|
||||
print('Gas consumed for third commit', s.state.receipts[-3].gas_used - s.state.receipts[-4].gas_used)
|
||||
assert casper.get_consensus_messages__committed(7)
|
||||
print("Three of four prepares and commits sufficient")
|
||||
# Start epoch 8 / dynasty 7
|
||||
s.state.block_number += EPOCH_LENGTH
|
||||
casper.initialize_epoch(8)
|
||||
assert casper.get_dynasty() == 7
|
||||
print("Epoch 8 initialized")
|
||||
assert 12 * 10**18 <= casper.get_total_deposits(6) <= 13 * 10**18
|
||||
assert 15 * 10**18 <= casper.get_total_deposits(7) <= 16 * 10**18
|
||||
epoch_8_anchash = utils.sha3(b'\x80' * 32 + epoch_7_anchash)
|
||||
# Do three prepares
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2]):
|
||||
casper.prepare(mk_prepare(i, 8, b'\x80' * 32, epoch_7_anchash, 7, epoch_7_anchash, k))
|
||||
# Three prepares are insufficient because there are still five validators in the rear validator set
|
||||
assert not casper.get_consensus_messages__hash_justified(8, b'\x80' * 32)
|
||||
print("Three prepares no longer sufficient, as the forward validator set has five validators")
|
||||
# Do one more prepare
|
||||
for i, k in [(3, t.k3)]:
|
||||
casper.prepare(mk_prepare(i, 8, b'\x80' * 32, epoch_7_anchash, 7, epoch_7_anchash, k))
|
||||
# Now we're good!
|
||||
assert casper.get_consensus_messages__hash_justified(8, b'\x80' * 32)
|
||||
print("Four of five prepares sufficient")
|
||||
for i, k in enumerate([t.k0, t.k1, t.k2, t.k3, t.k4]):
|
||||
casper.commit(mk_commit(i, 8, b'\x80' * 32, 7 if i < 3 else 5, k))
|
||||
assert casper.get_consensus_messages__committed(8)
|
||||
print("Committed")
|
||||
# Validator rejoins current validator set in epoch 8
|
||||
assert [casper.check_eligible_in_epoch(4, i) for i in range(9)] == [0, 0, 0, 2, 3, 1, 0, 0, 2]
|
||||
|
||||
print("All tests passed")
|
||||
@@ -1,378 +0,0 @@
|
||||
# Implements Minimal Slashing Conditions and dynamic validator sets, descriptions here:
|
||||
# Slashing Conditions: https://docs.google.com/document/d/1ecFPYhe7YsKNQUAx48S8hoyK9Y4Rbe9be_lCe_vj2ek
|
||||
# Dynamic Validator Sets: https://medium.com/@VitalikButerin/safety-under-dynamic-validator-sets-ef0c3bbdf9f6#.igylifcm9
|
||||
|
||||
import random
|
||||
|
||||
POOL_SIZE = 10
|
||||
VALIDATOR_IDS = range(0, POOL_SIZE*2)
|
||||
INITIAL_VALIDATORS = range(0, POOL_SIZE)
|
||||
BLOCK_TIME = 100
|
||||
EPOCH_LENGTH = 5
|
||||
AVG_LATENCY = 255
|
||||
|
||||
def poisson_latency(latency):
|
||||
return lambda: 1 + int(random.gammavariate(1, 1) * latency)
|
||||
|
||||
class Network():
|
||||
def __init__(self, latency):
|
||||
self.nodes = []
|
||||
self.latency = latency
|
||||
self.time = 0
|
||||
self.msg_arrivals = {}
|
||||
|
||||
def broadcast(self, msg):
|
||||
for i, n in enumerate(self.nodes):
|
||||
delay = self.latency()
|
||||
if self.time + delay not in self.msg_arrivals:
|
||||
self.msg_arrivals[self.time + delay] = []
|
||||
self.msg_arrivals[self.time + delay].append((i, msg))
|
||||
|
||||
def tick(self):
|
||||
if self.time in self.msg_arrivals:
|
||||
for node_index, msg in self.msg_arrivals[self.time]:
|
||||
self.nodes[node_index].on_receive(msg)
|
||||
del self.msg_arrivals[self.time]
|
||||
for n in self.nodes:
|
||||
n.tick(self.time)
|
||||
self.time += 1
|
||||
|
||||
class Block():
|
||||
def __init__(self, parent=None, finalized_dynasties=None):
|
||||
self.hash = random.randrange(10**30)
|
||||
# If we are genesis block, set initial values
|
||||
if not parent:
|
||||
self.number = 0
|
||||
self.prevhash = 0
|
||||
self.prev_dynasty = self.current_dynasty = Dynasty(INITIAL_VALIDATORS)
|
||||
self.next_dynasty = self.generate_next_dynasty(self.current_dynasty.number)
|
||||
return
|
||||
# Set our block number and our prevhash
|
||||
self.number = parent.number + 1
|
||||
self.prevhash = parent.hash
|
||||
# Generate a random next dynasty
|
||||
self.next_dynasty = self.generate_next_dynasty(parent.current_dynasty.number)
|
||||
# If the current_dynasty was finalized, we advance to the next dynasty
|
||||
if parent.current_dynasty in finalized_dynasties:
|
||||
self.prev_dynasty = parent.current_dynasty
|
||||
self.current_dynasty = parent.next_dynasty
|
||||
return
|
||||
# `current_dynasty` has not yet been finalized so we don't rotate validators
|
||||
self.prev_dynasty = parent.prev_dynasty
|
||||
self.current_dynasty = parent.current_dynasty
|
||||
|
||||
@property
|
||||
def epoch(self):
|
||||
return self.number // EPOCH_LENGTH
|
||||
|
||||
def generate_next_dynasty(self, prev_dynasty_number):
|
||||
random.seed(self.hash)
|
||||
next_dynasty = Dynasty(random.sample(VALIDATOR_IDS, POOL_SIZE), prev_dynasty_number+1)
|
||||
random.seed()
|
||||
return next_dynasty
|
||||
|
||||
class Prepare():
|
||||
def __init__(self, view, _hash, view_source, sender):
|
||||
self.view = view
|
||||
self.hash = random.randrange(10**30)
|
||||
self.blockhash = _hash
|
||||
self.view_source = view_source
|
||||
self.sender = sender
|
||||
|
||||
class Commit():
|
||||
def __init__(self, view, _hash, sender):
|
||||
self.view = view
|
||||
self.hash = random.randrange(10**30)
|
||||
self.blockhash = _hash
|
||||
self.sender = sender
|
||||
|
||||
class Dynasty():
|
||||
def __init__(self, validators, number=0):
|
||||
self.validators = validators
|
||||
self.number = number
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self.number) + str(self.validators))
|
||||
|
||||
def __eq__(self, other):
|
||||
return (str(self.number) + str(self.validators)) == (str(other.number) + str(other.validators))
|
||||
|
||||
GENESIS = Block()
|
||||
|
||||
# Fork choice rule:
|
||||
# 1. HEAD = genesis
|
||||
# 2. Find the descendant with the highest number of commits
|
||||
# 3. Repeat 2 until 0 commits
|
||||
# 4. Longest chain rule
|
||||
|
||||
class Node():
|
||||
def __init__(self, network, id):
|
||||
# List of highest-commit descendants along with their commit counts, in oldest-to-newest order
|
||||
self.checkpoints = [GENESIS.hash]
|
||||
# Received blocks
|
||||
self.received = {GENESIS.hash: GENESIS}
|
||||
# Messages that will be processed once a given message is received
|
||||
self.dependencies = {}
|
||||
# Checkpoint to view source to prepare count
|
||||
self.prepare_count = {}
|
||||
# Checkpoints that can be committed
|
||||
self.committable = {}
|
||||
# Commits for any given checkpoint
|
||||
# Genesis is an immutable start of the chain
|
||||
self.commits = {GENESIS.hash: INITIAL_VALIDATORS}
|
||||
# Set of finalized dynasties
|
||||
self.finalized_dynasties = set()
|
||||
self.finalized_dynasties.add(Dynasty(INITIAL_VALIDATORS))
|
||||
# My current epoch
|
||||
self.current_epoch = 0
|
||||
# My highest committed epoch and hash
|
||||
self.highest_committed_epoch = -1
|
||||
self.highest_committed_hash = GENESIS.hash
|
||||
# Network I am connected to
|
||||
self.network = network
|
||||
network.nodes.append(self)
|
||||
# Longest tail from each checkpoint
|
||||
self.tails = {GENESIS.hash: GENESIS}
|
||||
# Tail that each block belongs to
|
||||
self.tail_membership = {GENESIS.hash: GENESIS.hash}
|
||||
# This node's ID
|
||||
self.id = id
|
||||
|
||||
@property
|
||||
def head(self):
|
||||
latest_checkpoint = self.checkpoints[-1]
|
||||
latest_block = self.tails[latest_checkpoint]
|
||||
return latest_block
|
||||
|
||||
# Get the checkpoint immediately before a given checkpoint
|
||||
def get_checkpoint_parent(self, block):
|
||||
if block.number == 0:
|
||||
return None
|
||||
return self.received[self.tail_membership[block.prevhash]]
|
||||
|
||||
# If we received an object but did not receive some dependencies
|
||||
# needed to process it, save it to be processed later
|
||||
def add_dependency(self, _hash, obj):
|
||||
if _hash not in self.dependencies:
|
||||
self.dependencies[_hash] = []
|
||||
self.dependencies[_hash].append(obj)
|
||||
|
||||
# Is a given checkpoint an ancestor of another given checkpoint?
|
||||
def is_ancestor(self, anc, desc):
|
||||
if not isinstance(anc, Block):
|
||||
anc = self.received[anc]
|
||||
if not isinstance(desc, Block):
|
||||
desc = self.received[desc]
|
||||
assert anc.number % EPOCH_LENGTH == 0
|
||||
assert desc.number % EPOCH_LENGTH == 0
|
||||
while True:
|
||||
if desc is None:
|
||||
return False
|
||||
if desc.hash == anc.hash:
|
||||
return True
|
||||
desc = self.get_checkpoint_parent(desc)
|
||||
|
||||
def get_last_committed_checkpoint(self):
|
||||
z = len(self.checkpoints) - 1
|
||||
while self.score_checkpoint(self.received[self.checkpoints[z]]) < 1:
|
||||
z -= 1
|
||||
return self.checkpoints[z]
|
||||
|
||||
# Called on receiving a block
|
||||
def accept_block(self, block):
|
||||
# If we didn't receive the block's parent yet, wait
|
||||
if block.prevhash not in self.received:
|
||||
self.add_dependency(block.prevhash, block)
|
||||
return False
|
||||
# We recived the block
|
||||
self.received[block.hash] = block
|
||||
# print(self.id, 'got a block', block.number, block.hash)
|
||||
# If it's an epoch block (in general)
|
||||
if block.number % EPOCH_LENGTH == 0:
|
||||
# Start a tail object for it
|
||||
self.tail_membership[block.hash] = block.hash
|
||||
self.tails[block.hash] = block
|
||||
# Otherwise...
|
||||
else:
|
||||
# See if it's part of the longest tail, if so set the tail accordingly
|
||||
assert block.prevhash in self.received
|
||||
assert block.prevhash in self.tail_membership
|
||||
self.tail_membership[block.hash] = self.tail_membership[block.prevhash]
|
||||
if block.number > self.tails[self.tail_membership[block.hash]].number:
|
||||
self.tails[self.tail_membership[block.hash]] = block
|
||||
self.check_checkpoints(self.received[self.tail_membership[block.hash]])
|
||||
self.maybe_prepare_last_checkpoint()
|
||||
return True
|
||||
|
||||
def maybe_prepare_last_checkpoint(self):
|
||||
target_block = self.received[self.checkpoints[-1]]
|
||||
# If the block is an epoch block of a higher epoch than what we've seen so far
|
||||
if target_block.epoch > self.current_epoch:
|
||||
print('now in epoch %d' % target_block.epoch)
|
||||
# Increment our epoch
|
||||
self.current_epoch = target_block.epoch
|
||||
# If our highest committed hash is in the main chain (in most cases
|
||||
# it should be), then send a prepare
|
||||
last_committed_checkpoint = self.get_last_committed_checkpoint()
|
||||
if self.is_ancestor(self.highest_committed_hash, last_committed_checkpoint):
|
||||
print('Preparing %d for epoch %d with view source %d' %
|
||||
(target_block.hash, target_block.epoch, self.received[last_committed_checkpoint].epoch))
|
||||
self.network.broadcast(Prepare(target_block.epoch, target_block.hash, self.received[last_committed_checkpoint].epoch, self.id))
|
||||
assert self.received[target_block.hash]
|
||||
|
||||
# Pick a checkpoint by number of commits first, epoch number
|
||||
# (ie. longest chain rule) second
|
||||
def score_checkpoint(self, block):
|
||||
# Choose the dynasty (current or previous) with the minimum number of commits
|
||||
current_dynasty_number_of_commits = len(list(set(block.current_dynasty.validators) & set(self.commits.get(block.hash, []))))
|
||||
prev_dynasty_number_of_commits = len(list(set(block.prev_dynasty.validators) & set(self.commits.get(block.hash, []))))
|
||||
number_of_commits = min(current_dynasty_number_of_commits, prev_dynasty_number_of_commits)
|
||||
return number_of_commits + 0.000000001 * self.tails[block.hash].number
|
||||
|
||||
# See if a given epoch block requires us to reorganize our checkpoint list
|
||||
def check_checkpoints(self, block):
|
||||
# Is this hash already in our main chain? Then do nothing
|
||||
if block.hash in self.checkpoints:
|
||||
# prev_checkpoint = self.received[self.checkpoints[self.checkpoints.index(block.hash) - 1]]
|
||||
# if score_checkpoint(block) < score_checkpoint(prev_checkpoint):
|
||||
return
|
||||
# Figure out how many of our checkpoints we need to revert
|
||||
z = len(self.checkpoints) - 1
|
||||
new_score = self.score_checkpoint(block)
|
||||
while new_score > self.score_checkpoint(self.received[self.checkpoints[z]]):
|
||||
z -= 1
|
||||
# If none, do nothing
|
||||
if z == len(self.checkpoints) - 1 and block.number <= self.received[self.checkpoints[z-1]].number:
|
||||
return
|
||||
# Delete the checkpoints that need to be superseded
|
||||
self.checkpoints = self.checkpoints[:z + 1]
|
||||
# Re-run the fork choice rule
|
||||
while 1:
|
||||
# Find the descendant with the highest score (commits first, epoch second)
|
||||
max_score = 0
|
||||
max_descendant = None
|
||||
for _hash in self.tails:
|
||||
if self.is_ancestor(self.checkpoints[-1], _hash) and _hash != self.checkpoints[-1]:
|
||||
new_score = self.score_checkpoint(self.received[_hash])
|
||||
if new_score > max_score:
|
||||
max_score = new_score
|
||||
max_descendant = _hash
|
||||
# Append to the chain that checkpoint, and all checkpoints between the
|
||||
# last checkpoint and the new one
|
||||
if max_descendant:
|
||||
new_chain = [max_descendant]
|
||||
while new_chain[0] != self.checkpoints[-1]:
|
||||
new_chain.insert(0, self.get_checkpoint_parent(self.received[new_chain[0]]).hash)
|
||||
self.checkpoints.extend(new_chain[1:])
|
||||
# If there were no suitable descendants found, break
|
||||
else:
|
||||
break
|
||||
print('New checkpoints: %r' % [self.received[b].epoch for b in self.checkpoints])
|
||||
|
||||
# Called on receiving a prepare message
|
||||
def accept_prepare(self, prepare):
|
||||
if self.id == 0:
|
||||
print('got a prepare', prepare.view, prepare.view_source, prepare.blockhash, prepare.blockhash in self.received)
|
||||
# If the block has not yet been received, wait
|
||||
if prepare.blockhash not in self.received:
|
||||
self.add_dependency(prepare.blockhash, prepare)
|
||||
return False
|
||||
# If the sender is not in the prepare's dynasty, ignore the prepare
|
||||
if prepare.sender not in self.received[prepare.blockhash].current_dynasty.validators and \
|
||||
prepare.sender not in self.received[prepare.blockhash].prev_dynasty.validators:
|
||||
return False
|
||||
# Add to the prepare count
|
||||
if prepare.blockhash not in self.prepare_count:
|
||||
self.prepare_count[prepare.blockhash] = {}
|
||||
self.prepare_count[prepare.blockhash][prepare.view_source] = self.prepare_count[prepare.blockhash].get(prepare.view_source, 0) + 1
|
||||
# If there are enough prepares and the previous dynasty is finalized...
|
||||
if self.prepare_count[prepare.blockhash][prepare.view_source] > (POOL_SIZE * 2) // 3 and \
|
||||
self.received[prepare.blockhash].prev_dynasty in self.finalized_dynasties and \
|
||||
prepare.blockhash not in self.committable:
|
||||
# Mark it as committable
|
||||
self.committable[prepare.blockhash] = True
|
||||
# Start counting commits
|
||||
self.commits[prepare.blockhash] = []
|
||||
# If there are dependencies (ie. commits that arrived before there
|
||||
# were enough prepares), since there are now enough prepares we
|
||||
# can process them
|
||||
if "commit:"+str(prepare.blockhash) in self.dependencies:
|
||||
for c in self.dependencies["commit:"+str(prepare.blockhash)]:
|
||||
self.accept_commit(c)
|
||||
del self.dependencies["commit:"+str(prepare.blockhash)]
|
||||
# Broadcast a commit
|
||||
if self.current_epoch == prepare.view:
|
||||
self.network.broadcast(Commit(prepare.view, prepare.blockhash, self.id))
|
||||
print('Committing %d for epoch %d' % (prepare.blockhash, prepare.view))
|
||||
self.highest_committed_epoch = prepare.view
|
||||
self.highest_committed_hash = prepare.blockhash
|
||||
self.current_epoch = prepare.view + 0.5
|
||||
return True
|
||||
|
||||
# Called on receiving a commit message
|
||||
def accept_commit(self, commit):
|
||||
if self.id == 0:
|
||||
print('got a commmit', commit.view, commit.blockhash, commit.blockhash in self.received, commit.blockhash in self.committable)
|
||||
# If the block has not yet been received, wait
|
||||
if commit.blockhash not in self.received:
|
||||
self.add_dependency(commit.blockhash, commit)
|
||||
return False
|
||||
# If the sender is not in the commit's dynasty, ignore the commit
|
||||
if commit.sender not in self.received[commit.blockhash].current_dynasty.validators and \
|
||||
commit.sender not in self.received[commit.blockhash].prev_dynasty.validators:
|
||||
return False
|
||||
# If there have not yet been enough prepares, wait
|
||||
if commit.blockhash not in self.committable:
|
||||
self.add_dependency("commit:"+str(commit.blockhash), commit)
|
||||
return False
|
||||
# Add the commit by recording the sender
|
||||
self.commits[commit.blockhash].append(commit.sender)
|
||||
# Check if the block is finalized
|
||||
current_dynasty_commits = list(set(self.received[commit.blockhash].current_dynasty.validators) & set(self.commits[commit.blockhash]))
|
||||
prev_dynasty_commits = list(set(self.received[commit.blockhash].prev_dynasty.validators) & set(self.commits[commit.blockhash]))
|
||||
if len(current_dynasty_commits) > (POOL_SIZE * 2) // 3 and len(prev_dynasty_commits) > (POOL_SIZE * 2) // 3:
|
||||
# Because the block has been finalized let's record its dynasty as finalized
|
||||
finalized_dynasty = self.received[commit.blockhash].current_dynasty
|
||||
self.finalized_dynasties.add(finalized_dynasty)
|
||||
print('Finalizing dynasty number %d for block number %d' %
|
||||
(finalized_dynasty.number, self.received[commit.blockhash].number))
|
||||
# Update the checkpoints if needed
|
||||
self.check_checkpoints(self.received[commit.blockhash])
|
||||
return True
|
||||
|
||||
# Called on receiving any object
|
||||
def on_receive(self, obj):
|
||||
if obj.hash in self.received:
|
||||
return False
|
||||
if isinstance(obj, Block):
|
||||
o = self.accept_block(obj)
|
||||
elif isinstance(obj, Prepare):
|
||||
o = self.accept_prepare(obj)
|
||||
elif isinstance(obj, Commit):
|
||||
o = self.accept_commit(obj)
|
||||
# If the object was successfully processed
|
||||
# (ie. not flagged as having unsatisfied dependencies)
|
||||
if o:
|
||||
self.received[obj.hash] = obj
|
||||
if obj.hash in self.dependencies:
|
||||
for d in self.dependencies[obj.hash]:
|
||||
self.on_receive(d)
|
||||
del self.dependencies[obj.hash]
|
||||
|
||||
# Called every round
|
||||
def tick(self, _time):
|
||||
if self.id == (_time // BLOCK_TIME) % POOL_SIZE and _time % BLOCK_TIME == 0:
|
||||
new_block = Block(self.head, self.finalized_dynasties)
|
||||
self.network.broadcast(new_block)
|
||||
self.on_receive(new_block)
|
||||
|
||||
network = Network(poisson_latency(AVG_LATENCY))
|
||||
nodes = [Node(network, i) for i in VALIDATOR_IDS]
|
||||
for t in range(25000):
|
||||
network.tick()
|
||||
if t % 1000 == 999:
|
||||
print('Heads:', [n.head.number for n in nodes])
|
||||
print('Checkpoints:', nodes[0].checkpoints)
|
||||
print('Commits:', [nodes[0].commits.get(c, 0) for c in nodes[0].checkpoints])
|
||||
print('Blocks Dynasties:', [(nodes[0].received[c].current_dynasty.number) for c in nodes[0].checkpoints])
|
||||
print('All Node Dynasties:', [(node.tails[node.checkpoints[-1]].current_dynasty.number) for node in nodes])
|
||||
@@ -1,12 +0,0 @@
|
||||
with inp = ~calldataload(0):
|
||||
foo = inp
|
||||
exp = 0
|
||||
while foo >= 256:
|
||||
foo = ~div(foo, 256)
|
||||
exp += 1
|
||||
with x = ~div(inp, 16 ** exp):
|
||||
while 1:
|
||||
y = ~div(x + ~div(inp, x) + 1, 2)
|
||||
if x == y:
|
||||
return x
|
||||
x = y
|
||||
@@ -1,52 +0,0 @@
|
||||
# NO_SURROUND validation checker. For any attestation, call `new_attestation(s, t)`.
|
||||
# If there is a no surround validation, you will be notified. Requires 2*N*log(N)
|
||||
# bits per validator in addition to the attestations themselves.
|
||||
|
||||
class CollisionFound(Exception):
|
||||
pass
|
||||
|
||||
class Checker():
|
||||
def __init__(self, MAX=16):
|
||||
self.array1 = [i for i in range(MAX)]
|
||||
self.array2 = [MAX for i in range(MAX)]
|
||||
self.attestations = []
|
||||
|
||||
def new_attestation(self, s, t):
|
||||
assert s < t
|
||||
if s > 0 and t < self.array1[s - 1]:
|
||||
for os, ot in self.attestations:
|
||||
if os < s < t < ot:
|
||||
raise CollisionFound("Collision found: ({} {}) surrounds provided ({} {})".format(os, ot, s, t))
|
||||
raise Exception("panic, should never be here")
|
||||
if t > self.array2[s + 1]:
|
||||
for os, ot in self.attestations:
|
||||
if s < os < ot < t:
|
||||
raise CollisionFound("Collision found: ({} {}) is surrounded by provided ({} {})".format(os, ot, s, t))
|
||||
raise Exception("panic, should never be here")
|
||||
_s = s
|
||||
while self.array1[_s] < t and _s < t:
|
||||
self.array1[_s] = t
|
||||
_s += 1
|
||||
_s = s
|
||||
while self.array2[_s] > t and _s >= 0:
|
||||
self.array2[_s] = t
|
||||
_s -= 1
|
||||
for os, ot in self.attestations:
|
||||
assert not((os < s < t < ot) or (s < os < ot < t))
|
||||
self.attestations.append((s, t))
|
||||
|
||||
def test():
|
||||
c = Checker(16)
|
||||
import random
|
||||
for i in range(30):
|
||||
x, y = random.randrange(16), random.randrange(16)
|
||||
if x == y:
|
||||
continue
|
||||
try:
|
||||
c.new_attestation(min(x, y), max(x, y))
|
||||
except CollisionFound as e:
|
||||
print(e)
|
||||
print("Test successful")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
@@ -1,37 +0,0 @@
|
||||
import random, sys
|
||||
|
||||
|
||||
def normal_distribution(mean, standev):
|
||||
def f():
|
||||
return int(random.normalvariate(mean, standev))
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def exponential_distribution(mean):
|
||||
def f():
|
||||
total = 0
|
||||
while 1:
|
||||
total += 1
|
||||
if not random.randrange(32):
|
||||
break
|
||||
return int(total * 0.03125 * mean)
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def convolve(*args):
|
||||
def f():
|
||||
total = 0
|
||||
for arg in args:
|
||||
total += arg()
|
||||
return total
|
||||
|
||||
return f
|
||||
|
||||
|
||||
def transform(dist, xformer):
|
||||
def f():
|
||||
return xformer(dist())
|
||||
|
||||
return f
|
||||
@@ -1,278 +0,0 @@
|
||||
import os
|
||||
from binascii import hexlify
|
||||
from Crypto.Hash import keccak
|
||||
import random
|
||||
|
||||
def to_hex(s):
|
||||
return hexlify(s).decode('utf-8')
|
||||
|
||||
memo = {}
|
||||
|
||||
def sha3(x):
|
||||
if x not in memo:
|
||||
memo[x] = keccak.new(digest_bits=256, data=x).digest()
|
||||
return memo[x]
|
||||
|
||||
def hash_to_int(h):
|
||||
o = 0
|
||||
for c in h:
|
||||
o = (o << 8) + c
|
||||
return o
|
||||
|
||||
NOTARIES = 100
|
||||
SLOT_SIZE = 6
|
||||
EPOCH_LENGTH = 25
|
||||
|
||||
# Not a full RANDAO; stub for now
|
||||
class Block():
|
||||
def __init__(self, parent, slot, proposer):
|
||||
self.contents = os.urandom(32)
|
||||
self.parent_hash = parent.hash if parent else (b'\x00' * 32)
|
||||
self.hash = sha3(self.parent_hash + self.contents)
|
||||
self.height = parent.height + 1 if parent else 0
|
||||
assert slot % NOTARIES == proposer
|
||||
self.proposer = proposer
|
||||
self.slot = slot
|
||||
|
||||
def min_timestamp(self):
|
||||
return SLOT_SIZE * self.slot
|
||||
|
||||
class Sig():
|
||||
def __init__(self, proposer, targets, slot, ts):
|
||||
self.proposer = proposer
|
||||
self.targets = targets
|
||||
self.slot = slot
|
||||
self.ts = ts
|
||||
self.hash = os.urandom(32)
|
||||
|
||||
genesis = Block(None, 0, 0)
|
||||
|
||||
class Node():
|
||||
|
||||
def __init__(self, _id, network, sleepy=False, careless=False, ts=0):
|
||||
self.blocks = {
|
||||
genesis.hash: genesis,
|
||||
}
|
||||
self.sigs = {}
|
||||
self.main_chain = [genesis.hash]
|
||||
self.timequeue = []
|
||||
self.parentqueue = {}
|
||||
self.children = {}
|
||||
self.scores = {}
|
||||
self.scores_at_height = {}
|
||||
self.justified = {}
|
||||
self.finalized = {}
|
||||
self.ts = ts
|
||||
self.id = _id
|
||||
self.network = network
|
||||
self.used_parents = {}
|
||||
self.processed = {}
|
||||
self.sleepy = sleepy
|
||||
self.careless = careless
|
||||
self.first_round = True
|
||||
self.last_made_block = 0
|
||||
self.last_made_sig = 0
|
||||
|
||||
def broadcast(self, x):
|
||||
if self.sleepy and self.ts:
|
||||
return
|
||||
self.network.broadcast(self, x)
|
||||
self.on_receive(x)
|
||||
|
||||
def log(self, words, lvl=3, all=False):
|
||||
#if "Tick:" != words[:5] or self.id == 0:
|
||||
if (self.id == 0 or all) and lvl >= 2:
|
||||
print(self.id, words)
|
||||
|
||||
def on_receive(self, obj, reprocess=False):
|
||||
if obj.hash in self.processed and not reprocess:
|
||||
return
|
||||
self.processed[obj.hash] = obj
|
||||
if isinstance(obj, Block):
|
||||
return self.on_receive_beacon_block(obj)
|
||||
elif isinstance(obj, Sig):
|
||||
return self.on_receive_sig(obj)
|
||||
|
||||
def add_to_timequeue(self, obj):
|
||||
i = 0
|
||||
while i < len(self.timequeue) and self.timequeue[i].min_timestamp() < obj.min_timestamp():
|
||||
i += 1
|
||||
self.timequeue.insert(i, obj)
|
||||
|
||||
def add_to_multiset(self, _set, k, v):
|
||||
if k not in _set:
|
||||
_set[k] = []
|
||||
_set[k].append(v)
|
||||
|
||||
def change_head(self, chain, new_head):
|
||||
chain.extend([None] * (new_head.height + 1 - len(chain)))
|
||||
i, c = new_head.height, new_head.hash
|
||||
while c != chain[i]:
|
||||
chain[i] = c
|
||||
c = self.blocks[c].parent_hash
|
||||
i -= 1
|
||||
for i in range(len(chain)):
|
||||
assert self.blocks[chain[i]].height == i
|
||||
|
||||
def recalculate_head(self):
|
||||
while 1:
|
||||
descendant_queue = [self.main_chain[-1]]
|
||||
new_head = None
|
||||
max_count = 0
|
||||
while len(descendant_queue):
|
||||
first = descendant_queue.pop(0)
|
||||
if first in self.children:
|
||||
for c in self.children[first]:
|
||||
descendant_queue.append(c)
|
||||
if self.scores.get(first, 0) > max_count and first != self.main_chain[-1]:
|
||||
new_head = first
|
||||
max_count = self.scores.get(first, 0)
|
||||
if new_head:
|
||||
self.change_head(self.main_chain, self.blocks[new_head])
|
||||
else:
|
||||
return
|
||||
|
||||
def process_children(self, h):
|
||||
if h in self.parentqueue:
|
||||
for b in self.parentqueue[h]:
|
||||
self.on_receive(b, reprocess=True)
|
||||
del self.parentqueue[h]
|
||||
|
||||
def get_common_ancestor(self, a, b):
|
||||
a, b = self.blocks[a], self.blocks[b]
|
||||
while b.height > a.height:
|
||||
b = self.blocks[b.parent_hash]
|
||||
while a.height > b.height:
|
||||
a = self.blocks[a.parent_hash]
|
||||
while a.hash != b.hash:
|
||||
a = self.blocks[a.parent_hash]
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a
|
||||
|
||||
def is_descendant(self, a, b):
|
||||
a, b = self.blocks[a], self.blocks[b]
|
||||
while b.height > a.height:
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a.hash == b.hash
|
||||
|
||||
def have_ancestry(self, h):
|
||||
while h != genesis.hash:
|
||||
if h not in self.processed:
|
||||
return False
|
||||
h = self.processed[h].parent_hash
|
||||
return True
|
||||
|
||||
def on_receive_beacon_block(self, block):
|
||||
# Parent not yet received
|
||||
if block.parent_hash not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, block.parent_hash, block)
|
||||
return
|
||||
# Too early
|
||||
if block.min_timestamp() > self.ts:
|
||||
self.add_to_timequeue(block)
|
||||
return
|
||||
# Add the block
|
||||
self.log("Processing beacon block %s" % to_hex(block.hash[:4]))
|
||||
self.blocks[block.hash] = block
|
||||
# Is the block building on the head? Then add it to the head!
|
||||
if block.parent_hash == self.main_chain[-1] or self.careless:
|
||||
self.main_chain.append(block.hash)
|
||||
# Add child record
|
||||
self.add_to_multiset(self.children, block.parent_hash, block.hash)
|
||||
# Final steps
|
||||
self.process_children(block.hash)
|
||||
self.network.broadcast(self, block)
|
||||
|
||||
def on_receive_sig(self, sig):
|
||||
if sig.targets[0] not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, sig.targets[0], sig)
|
||||
return
|
||||
# Get common ancestor
|
||||
anc = self.get_common_ancestor(self.main_chain[-1], sig.targets[0])
|
||||
max_score = max([0] + [self.scores.get(self.main_chain[i], 0) for i in range(anc.height + 1, len(self.main_chain))])
|
||||
# Process scoring
|
||||
max_newchain_score = 0
|
||||
for i, c in list(enumerate(sig.targets))[::-1]:
|
||||
slot = sig.slot - 1 - i
|
||||
slot_key = slot.to_bytes(4, 'big')
|
||||
assert self.blocks[c].slot <= slot
|
||||
|
||||
# If a parent and child block have non-consecutive slots, then the parent
|
||||
# block is also considered to be the canonical block at all of the intermediate
|
||||
# slot numbers. We store the scores for the block at each height separately
|
||||
self.scores_at_height[slot_key + c] = self.scores_at_height.get(slot_key + c, 0) + 1
|
||||
|
||||
# For fork choice rule purposes, the score of a block is the highest score
|
||||
# that it has at any height
|
||||
self.scores[c] = max(self.scores.get(c, 0), self.scores_at_height[slot_key + c])
|
||||
|
||||
# If 2/3 of notaries vote for a block, it is justified
|
||||
if self.scores_at_height[slot_key + c] == NOTARIES * 2 // 3:
|
||||
self.justified[c] = True
|
||||
c2 = c
|
||||
self.log("Justified: %d %s" % (slot, hexlify(c).decode('utf-8')[:8]))
|
||||
|
||||
# If EPOCH_LENGTH+1 blocks are justified in a row, the oldest is
|
||||
# considered finalized
|
||||
|
||||
finalize = True
|
||||
for slot2 in range(slot - 1, max(slot - EPOCH_LENGTH * 2, 0) - 1, -1):
|
||||
if slot2 < self.blocks[c2].slot:
|
||||
c2 = self.blocks[c2].parent_hash
|
||||
if self.scores_at_height.get(slot2.to_bytes(4, 'big') + c2, 0) < (NOTARIES * 2 // 3):
|
||||
finalize = False
|
||||
# self.log("Not quite finalized: stopped at %d needed %d" % (slot2, max(slot - EPOCH_LENGTH, 0)))
|
||||
break
|
||||
if slot2 < slot - EPOCH_LENGTH - 1 and finalize and c2 not in self.finalized:
|
||||
self.log("Finalized: %d %s" % (self.blocks[c2].slot, hexlify(c).decode('utf-8')[:8]))
|
||||
self.finalized[c2] = True
|
||||
|
||||
# Find the maximum score of a block on the chain that this sig is weighing on
|
||||
if self.blocks[c].slot > anc.slot:
|
||||
max_newchain_score = max(max_newchain_score, self.scores[c])
|
||||
|
||||
# If it's higher, switch over the canonical chain
|
||||
if max_newchain_score > max_score:
|
||||
self.main_chain = self.main_chain[:anc.height+1]
|
||||
self.recalculate_head()
|
||||
|
||||
self.sigs[sig.hash] = sig
|
||||
|
||||
# Rebroadcast
|
||||
self.network.broadcast(self, sig)
|
||||
|
||||
# Get the portion of the main chain that is within the last EPOCH_LENGTH
|
||||
# slots, once again duplicating the parent in cases where the parent and
|
||||
# child's slots are not consecutive
|
||||
def get_sig_targets(self, start_slot):
|
||||
o = []
|
||||
i = len(self.main_chain) - 1
|
||||
for slot in range(start_slot - 1, max(start_slot - EPOCH_LENGTH, 0) - 1, -1):
|
||||
if slot < self.blocks[self.main_chain[i]].slot:
|
||||
i -= 1
|
||||
o.append(self.main_chain[i])
|
||||
for i, x in enumerate(o):
|
||||
assert self.blocks[x].slot <= start_slot - 1 - i
|
||||
assert len(o) == min(EPOCH_LENGTH, start_slot)
|
||||
return o
|
||||
|
||||
def tick(self):
|
||||
self.ts += 0.1
|
||||
self.log("Tick: %.1f" % self.ts, lvl=1)
|
||||
# Make a block?
|
||||
slot = int(self.ts // SLOT_SIZE)
|
||||
if slot > self.last_made_block and (slot % NOTARIES) == self.id:
|
||||
self.broadcast(Block(self.blocks[self.main_chain[-1]], slot, self.id))
|
||||
self.last_made_block = slot
|
||||
# Make a sig?
|
||||
if slot > self.last_made_sig and (slot % EPOCH_LENGTH) == self.id % EPOCH_LENGTH:
|
||||
sig_from = len(self.main_chain) - 1
|
||||
while sig_from > 0 and self.blocks[self.main_chain[sig_from]].slot >= slot - EPOCH_LENGTH:
|
||||
sig_from -= 1
|
||||
sig = Sig(self.id, self.get_sig_targets(slot), slot, self.ts)
|
||||
# self.log('Sig:', self.id, sig.slot, ' '.join([hexlify(t).decode('utf-8')[:4] for t in sig.targets]))
|
||||
self.broadcast(sig)
|
||||
self.last_made_sig = slot
|
||||
# Process time queue
|
||||
while len(self.timequeue) and self.timequeue[0].min_timestamp() <= self.ts:
|
||||
self.on_receive(self.timequeue.pop(0), reprocess=True)
|
||||
@@ -1,73 +0,0 @@
|
||||
from networksim import NetworkSimulator
|
||||
from ghost_node import Node, NOTARIES, Block, Sig, genesis, SLOT_SIZE
|
||||
from distributions import normal_distribution
|
||||
|
||||
net = NetworkSimulator(latency=45)
|
||||
notaries = [Node(i, net, ts=max(normal_distribution(5, 5)(), 0) * 0.1, sleepy=False) for i in range(NOTARIES)]
|
||||
net.agents = notaries
|
||||
net.generate_peers()
|
||||
for i in range(12000):
|
||||
net.tick()
|
||||
for n in notaries:
|
||||
print("Local timestamp: %.1f, timequeue len %d" % (n.ts, len(n.timequeue)))
|
||||
print("Main chain head: %d" % n.blocks[n.main_chain[-1]].height)
|
||||
print("Total main chain blocks received: %d" % (len([b for b in n.blocks.values() if isinstance(b, Block)]) - 1))
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
G=nx.Graph()
|
||||
|
||||
#positions = {genesis.hash: 0, beacon_genesis.hash: 0}
|
||||
#queue = [
|
||||
|
||||
for b in n.blocks.values():
|
||||
if b.height > 0:
|
||||
if isinstance(b, Block):
|
||||
G.add_edge(b.hash, b.parent_hash, color='b')
|
||||
for s in n.sigs.values():
|
||||
G.add_edge(s.hash, s.targets[0], color='0.75')
|
||||
|
||||
|
||||
cache = {genesis.hash: 0}
|
||||
|
||||
def mkoffset(b):
|
||||
if b.hash not in cache:
|
||||
cache[b.hash] = cache[b.parent_hash] + random.randrange(35)
|
||||
return cache[b.hash]
|
||||
|
||||
pos={b'\x00'*32: (0, 0)}
|
||||
for b in sorted(n.blocks.values(), key=lambda b: b.height):
|
||||
x,y = pos[b.parent_hash]
|
||||
pos[b.hash] = (x + (random.randrange(5) if b.hash in n.main_chain else -random.randrange(5)), y+10)
|
||||
for s in n.sigs.values():
|
||||
parent = n.blocks[s.targets[0]]
|
||||
x,y = pos[parent.hash]
|
||||
pos[s.hash] = (x - 2 + random.randrange(5),
|
||||
y + 5)
|
||||
|
||||
finalized = {k:v for k,v in pos.items() if k in n.finalized}
|
||||
justified = {k:v for k,v in pos.items() if k in n.justified and k not in n.finalized}
|
||||
unjustified = {k:v for k,v in pos.items() if k not in n.justified and k in n.blocks}
|
||||
sigs = {k:v for k,v in pos.items() if k not in n.blocks}
|
||||
|
||||
edges = G.edges()
|
||||
colors = [G[u][v]['color'] for u,v in edges]
|
||||
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=sigs.keys(), node_size=5, node_shape='o',node_color='0.75')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=unjustified.keys(), node_size=10, node_shape='o',node_color='0.75')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=justified.keys(), node_size=16, node_shape='o',node_color='y')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=finalized.keys(), node_size=25, node_shape='o',node_color='g')
|
||||
# nx.draw_networkx_labels(G, pos, {h: n.scores.get(h, 0) for h in n.blocks.keys()}, font_size=5)
|
||||
|
||||
blockedges = [(u,v) for (u,v) in edges if G[u][v]['color'] == 'b']
|
||||
otheredges = [(u,v) for (u,v) in edges if G[u][v]['color'] == '0.75']
|
||||
nx.draw_networkx_edges(G, pos, edgelist=otheredges, width=1, edge_color='0.75')
|
||||
nx.draw_networkx_edges(G, pos, edgelist=blockedges, width=2, edge_color='b')
|
||||
|
||||
print('Scores:', [n.scores.get(c, 0) for c in n.main_chain])
|
||||
|
||||
plt.axis('off')
|
||||
# plt.savefig("degree.png", bbox_inches="tight")
|
||||
plt.show()
|
||||
@@ -1,367 +0,0 @@
|
||||
import os
|
||||
from binascii import hexlify
|
||||
from Crypto.Hash import keccak
|
||||
import random
|
||||
|
||||
def to_hex(s):
|
||||
return hexlify(s).decode('utf-8')
|
||||
|
||||
memo = {}
|
||||
|
||||
def sha3(x):
|
||||
if x not in memo:
|
||||
memo[x] = keccak.new(digest_bits=256, data=x).digest()
|
||||
return memo[x]
|
||||
|
||||
def hash_to_int(h):
|
||||
o = 0
|
||||
for c in h:
|
||||
o = (o << 8) + c
|
||||
return o
|
||||
|
||||
def get_most_common_entry(lst):
|
||||
counts = {}
|
||||
for l in lst:
|
||||
counts[l] = counts.get(l, 0) + 1
|
||||
maxcount, maxkey = max(zip(counts.values(), counts.keys()))
|
||||
return maxkey, maxcount
|
||||
|
||||
NOTARIES = 50
|
||||
SLOT_SIZE = 3
|
||||
EPOCH_LENGTH = 25
|
||||
|
||||
# Not a full RANDAO; stub for now
|
||||
class Block():
|
||||
def __init__(self, parent, slot, proposer):
|
||||
self.contents = os.urandom(32)
|
||||
if parent:
|
||||
self.ancestor_hashes = [None] * 16
|
||||
self.ancestor_slots = [0] * 16
|
||||
for i in range(16):
|
||||
if (parent.slot // 2**i) > (parent.ancestor_slots[i] // 2**i):
|
||||
self.ancestor_hashes[i] = parent.hash
|
||||
self.ancestor_slots[i] = parent.slot
|
||||
else:
|
||||
self.ancestor_hashes[i] = parent.ancestor_hashes[i]
|
||||
self.ancestor_slots[i] = parent.ancestor_slots[i]
|
||||
else:
|
||||
self.ancestor_hashes = [b'\x00' * 32 for i in range(16)]
|
||||
self.ancestor_slots = [-1 for i in range(16)]
|
||||
self.hash = sha3(self.parent_hash + self.contents)
|
||||
self.height = parent.height + 1 if parent else 0
|
||||
assert slot % NOTARIES == proposer
|
||||
self.proposer = proposer
|
||||
self.slot = slot
|
||||
|
||||
@property
|
||||
def parent_hash(self):
|
||||
return self.ancestor_hashes[0]
|
||||
|
||||
def min_timestamp(self):
|
||||
return SLOT_SIZE * self.slot
|
||||
|
||||
class Sig():
|
||||
def __init__(self, proposer, targets, slot, ts):
|
||||
self.proposer = proposer
|
||||
self.targets = targets
|
||||
self.slot = slot
|
||||
self.ts = ts
|
||||
self.hash = os.urandom(32)
|
||||
|
||||
genesis = Block(None, 0, 0)
|
||||
|
||||
class Node():
|
||||
|
||||
def __init__(self, _id, network, sleepy=False, careless=False, ts=0):
|
||||
self.blocks = {
|
||||
genesis.hash: genesis,
|
||||
}
|
||||
self.sigs = {}
|
||||
self.main_chain = [genesis.hash]
|
||||
self.lmd_head = genesis.hash
|
||||
self.timequeue = []
|
||||
self.parentqueue = {}
|
||||
self.children = {}
|
||||
self.scores = {}
|
||||
self.scores_at_height = {}
|
||||
self.justified = {}
|
||||
self.finalized = {}
|
||||
self.ts = ts
|
||||
self.id = _id
|
||||
self.network = network
|
||||
self.used_parents = {}
|
||||
self.processed = {}
|
||||
self.sleepy = sleepy
|
||||
self.careless = careless
|
||||
self.first_round = True
|
||||
self.last_made_block = 0
|
||||
self.last_made_sig = 0
|
||||
self.most_recent_votes = {}
|
||||
self.observed_ts_deltas = [0] * NOTARIES
|
||||
|
||||
def broadcast(self, x):
|
||||
if self.sleepy and self.ts:
|
||||
return
|
||||
self.network.broadcast(self, x)
|
||||
self.on_receive(x)
|
||||
|
||||
def log(self, words, lvl=3, all=False):
|
||||
#if "Tick:" != words[:5] or self.id == 0:
|
||||
if (self.id == 0 or all) and lvl >= 2:
|
||||
print(self.id, words)
|
||||
|
||||
def on_receive(self, obj, reprocess=False):
|
||||
if obj.hash in self.processed and not reprocess:
|
||||
return
|
||||
self.processed[obj.hash] = obj
|
||||
if isinstance(obj, Block):
|
||||
return self.on_receive_beacon_block(obj)
|
||||
elif isinstance(obj, Sig):
|
||||
return self.on_receive_sig(obj)
|
||||
|
||||
def add_to_timequeue(self, obj):
|
||||
i = 0
|
||||
while i < len(self.timequeue) and self.timequeue[i].min_timestamp() < obj.min_timestamp():
|
||||
i += 1
|
||||
self.timequeue.insert(i, obj)
|
||||
|
||||
def add_to_multiset(self, _set, k, v):
|
||||
if k not in _set:
|
||||
_set[k] = []
|
||||
_set[k].append(v)
|
||||
|
||||
def get_ancestor_at_slot(self, block, slot):
|
||||
if not isinstance(block, Block):
|
||||
block = self.blocks[block]
|
||||
if block.slot <= slot:
|
||||
return block
|
||||
d = 15
|
||||
while (block.slot - slot) < 2**d:
|
||||
d -= 1
|
||||
anc = self.blocks[block.ancestor_hashes[d]]
|
||||
return self.get_ancestor_at_slot(anc, slot)
|
||||
|
||||
def change_head(self, chain, new_head):
|
||||
chain.extend([None] * (new_head.height + 1 - len(chain)))
|
||||
i, c = new_head.height, new_head.hash
|
||||
while c != chain[i]:
|
||||
chain[i] = c
|
||||
c = self.blocks[c].parent_hash
|
||||
i -= 1
|
||||
for i in range(len(chain)):
|
||||
assert self.blocks[chain[i]].height == i
|
||||
|
||||
def recalculate_head(self):
|
||||
while 1:
|
||||
descendant_queue = [self.main_chain[-1]]
|
||||
new_head = None
|
||||
max_count = 0
|
||||
while len(descendant_queue):
|
||||
first = descendant_queue.pop(0)
|
||||
if first in self.children:
|
||||
for c in self.children[first]:
|
||||
descendant_queue.append(c)
|
||||
if self.scores.get(first, 0) > max_count and first != self.main_chain[-1]:
|
||||
new_head = first
|
||||
max_count = self.scores.get(first, 0)
|
||||
if new_head:
|
||||
self.change_head(self.main_chain, self.blocks[new_head])
|
||||
else:
|
||||
return
|
||||
|
||||
def process_children(self, h):
|
||||
if h in self.parentqueue:
|
||||
for b in self.parentqueue[h]:
|
||||
self.on_receive(b, reprocess=True)
|
||||
del self.parentqueue[h]
|
||||
|
||||
def get_common_ancestor(self, a, b):
|
||||
if not isinstance(a, Block):
|
||||
a = self.blocks[a]
|
||||
if not isinstance(b, Block):
|
||||
b = self.blocks[b]
|
||||
while b.height > a.height:
|
||||
b = self.blocks[b.parent_hash]
|
||||
while a.height > b.height:
|
||||
a = self.blocks[a.parent_hash]
|
||||
while a.hash != b.hash:
|
||||
a = self.blocks[a.parent_hash]
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a
|
||||
|
||||
def is_descendant(self, a, b):
|
||||
a, b = self.blocks[a], self.blocks[b]
|
||||
while b.height > a.height:
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a.hash == b.hash
|
||||
|
||||
def have_ancestry(self, h):
|
||||
while h != genesis.hash:
|
||||
if h not in self.processed:
|
||||
return False
|
||||
h = self.processed[h].parent_hash
|
||||
return True
|
||||
|
||||
def on_receive_beacon_block(self, block):
|
||||
# Parent not yet received
|
||||
if block.parent_hash not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, block.parent_hash, block)
|
||||
return
|
||||
# Too early
|
||||
if block.min_timestamp() > self.ts:
|
||||
self.add_to_timequeue(block)
|
||||
return
|
||||
# Add the block
|
||||
self.log("Processing beacon block %s" % to_hex(block.hash[:4]))
|
||||
self.blocks[block.hash] = block
|
||||
# Is the block building on the head? Then add it to the head!
|
||||
if block.parent_hash == self.main_chain[-1] or self.careless:
|
||||
self.main_chain.append(block.hash)
|
||||
# Add child record
|
||||
self.add_to_multiset(self.children, block.parent_hash, block.hash)
|
||||
# head = self.compute_lmd_head()
|
||||
# if head.hash == block.hash:
|
||||
self.observed_ts_deltas[block.proposer] = block.min_timestamp() - self.ts
|
||||
# Final steps
|
||||
self.process_children(block.hash)
|
||||
self.network.broadcast(self, block)
|
||||
|
||||
def compute_lmd_head(self):
|
||||
voters = list(range(NOTARIES))
|
||||
binary_search_anchor = 0
|
||||
skip = 1
|
||||
last_nonnull_maxkey = genesis.hash
|
||||
print("Computing LMD head")
|
||||
print('top_slots of votes', [self.blocks[v[0]].slot for v in self.most_recent_votes.values()])
|
||||
while 1:
|
||||
slot_at = binary_search_anchor + skip
|
||||
votes_at_slot = []
|
||||
voters_for_hash = {}
|
||||
for voter in voters:
|
||||
if voter in self.most_recent_votes and self.most_recent_votes[voter][1] >= slot_at:
|
||||
votes_at_slot.append(self.get_ancestor_at_slot(self.most_recent_votes[voter][0], slot_at).hash)
|
||||
maxkey, maxcount = get_most_common_entry(votes_at_slot) if votes_at_slot else (None, 0)
|
||||
if maxkey:
|
||||
last_nonnull_maxkey = maxkey
|
||||
assert votes_at_slot.count(maxkey) == maxcount
|
||||
print(slot_at, maxkey, maxcount)
|
||||
if maxcount > len(voters) // 2:
|
||||
binary_search_anchor += skip
|
||||
skip *= 2
|
||||
else:
|
||||
if skip == 1:
|
||||
remaining_voters = [
|
||||
v for v in voters if \
|
||||
v in self.most_recent_votes and \
|
||||
self.most_recent_votes[v][1] >= slot_at and \
|
||||
self.get_ancestor_at_slot(self.most_recent_votes[v][0], slot_at).hash == maxkey
|
||||
]
|
||||
assert maxcount == len(remaining_voters)
|
||||
voters = remaining_voters
|
||||
binary_search_anchor += skip
|
||||
print("%d remaining_voters" % len(remaining_voters))
|
||||
print('top_slots', [self.blocks[self.most_recent_votes[v][0]].slot for v in voters])
|
||||
else:
|
||||
skip //= 2
|
||||
if len(voters) == 0:
|
||||
o = self.blocks[last_nonnull_maxkey]
|
||||
while o.hash in self.children:
|
||||
o = self.blocks[self.children[o.hash][0]]
|
||||
return o
|
||||
|
||||
def on_receive_sig(self, sig):
|
||||
if sig.targets[0] not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, sig.targets[0], sig)
|
||||
return
|
||||
# Get common ancestor
|
||||
anc = self.get_common_ancestor(self.main_chain[-1], sig.targets[0])
|
||||
max_score = max([0] + [self.scores.get(self.main_chain[i], 0) for i in range(anc.height + 1, len(self.main_chain))])
|
||||
# Process scoring
|
||||
max_newchain_score = 0
|
||||
for i, c in list(enumerate(sig.targets))[::-1]:
|
||||
slot = sig.slot - 1 - i
|
||||
slot_key = slot.to_bytes(4, 'big')
|
||||
assert self.blocks[c].slot <= slot
|
||||
|
||||
# If a parent and child block have non-consecutive slots, then the parent
|
||||
# block is also considered to be the canonical block at all of the intermediate
|
||||
# slot numbers. We store the scores for the block at each height separately
|
||||
self.scores_at_height[slot_key + c] = self.scores_at_height.get(slot_key + c, 0) + 1
|
||||
|
||||
# For fork choice rule purposes, the score of a block is the highest score
|
||||
# that it has at any height
|
||||
self.scores[c] = max(self.scores.get(c, 0), self.scores_at_height[slot_key + c])
|
||||
|
||||
# If 2/3 of notaries vote for a block, it is justified
|
||||
if self.scores_at_height[slot_key + c] == NOTARIES * 2 // 3:
|
||||
self.justified[c] = True
|
||||
c2 = c
|
||||
self.log("Justified: %d %s" % (slot, hexlify(c).decode('utf-8')[:8]))
|
||||
|
||||
# If EPOCH_LENGTH+1 blocks are justified in a row, the oldest is
|
||||
# considered finalized
|
||||
|
||||
finalize = True
|
||||
for slot2 in range(slot - 1, max(slot - EPOCH_LENGTH * 2, 0) - 1, -1):
|
||||
if slot2 < self.blocks[c2].slot:
|
||||
c2 = self.blocks[c2].parent_hash
|
||||
if self.scores_at_height.get(slot2.to_bytes(4, 'big') + c2, 0) < (NOTARIES * 2 // 3):
|
||||
finalize = False
|
||||
# self.log("Not quite finalized: stopped at %d needed %d" % (slot2, max(slot - EPOCH_LENGTH, 0)))
|
||||
break
|
||||
if slot2 < slot - EPOCH_LENGTH - 1 and finalize and c2 not in self.finalized:
|
||||
self.log("Finalized: %d %s" % (self.blocks[c2].slot, hexlify(c).decode('utf-8')[:8]))
|
||||
self.finalized[c2] = True
|
||||
|
||||
# Find the maximum score of a block on the chain that this sig is weighing on
|
||||
if self.blocks[c].slot > anc.slot:
|
||||
max_newchain_score = max(max_newchain_score, self.scores[c])
|
||||
|
||||
# If it's higher, switch over the canonical chain
|
||||
if max_newchain_score > max_score:
|
||||
self.main_chain = self.main_chain[:anc.height+1]
|
||||
self.recalculate_head()
|
||||
|
||||
# Adjust most recent votes array
|
||||
existing_vote_slot = self.most_recent_votes.get(sig.proposer, (None, -1))[1]
|
||||
if sig.slot > existing_vote_slot:
|
||||
self.most_recent_votes[sig.proposer] = (sig.targets[0], sig.slot)
|
||||
|
||||
self.sigs[sig.hash] = sig
|
||||
|
||||
# Rebroadcast
|
||||
self.network.broadcast(self, sig)
|
||||
|
||||
# Get an EPOCH_LENGTH-chain starting from a given block and slot
|
||||
# slots, once again duplicating the parent in cases where the parent and
|
||||
# child's slots are not consecutive
|
||||
def get_sig_targets(self, head, slot):
|
||||
return [self.get_ancestor_at_slot(head, s).hash for s in range(slot - 1, max(slot - EPOCH_LENGTH, 0) - 1, -1)]
|
||||
|
||||
def get_adjusted_timestamp(self):
|
||||
pull_threshold = 0.50
|
||||
add_zeroes = int(NOTARIES * (pull_threshold * 2 -1))
|
||||
index = int(NOTARIES * pull_threshold)
|
||||
return self.ts + sorted(self.observed_ts_deltas + [0] * add_zeroes)[index]
|
||||
|
||||
def tick(self):
|
||||
self.ts += 0.1
|
||||
self.log("Tick: %.1f" % self.ts, lvl=1)
|
||||
# Make a block?
|
||||
ts = self.get_adjusted_timestamp()
|
||||
slot = int(ts // SLOT_SIZE)
|
||||
if slot > self.last_made_block and (slot % NOTARIES) == self.id:
|
||||
head = self.compute_lmd_head()
|
||||
b = Block(head, slot, self.id)
|
||||
self.broadcast(b)
|
||||
self.last_made_block = slot
|
||||
# Make a sig?
|
||||
if slot > self.last_made_sig and (slot % EPOCH_LENGTH) == self.id % EPOCH_LENGTH:
|
||||
head = self.compute_lmd_head()
|
||||
sig = Sig(self.id, self.get_sig_targets(head, slot), slot, ts)
|
||||
# self.log('Sig:', self.id, sig.slot, ' '.join([hexlify(t).decode('utf-8')[:4] for t in sig.targets]))
|
||||
self.broadcast(sig)
|
||||
self.last_made_sig = slot
|
||||
# Process time queue
|
||||
while len(self.timequeue) and self.timequeue[0].min_timestamp() <= ts:
|
||||
self.on_receive(self.timequeue.pop(0), reprocess=True)
|
||||
@@ -1,74 +0,0 @@
|
||||
from networksim import NetworkSimulator
|
||||
from lmd_node import Node, NOTARIES, Block, Sig, genesis, SLOT_SIZE
|
||||
from distributions import normal_distribution
|
||||
|
||||
net = NetworkSimulator(latency=20)
|
||||
notaries = [Node(i, net, ts=max(normal_distribution(1000, 1000)(), 0) * 0.1, sleepy=False) for i in range(NOTARIES)]
|
||||
net.agents = notaries
|
||||
net.generate_peers()
|
||||
for i in range(12000):
|
||||
net.tick()
|
||||
for n in notaries:
|
||||
print("Local timestamp: %.1f, timequeue len %d" % (n.ts, len(n.timequeue)))
|
||||
print("Main chain head: %d" % n.compute_lmd_head().height)
|
||||
print("Total main chain blocks received: %d" % (len([b for b in n.blocks.values() if isinstance(b, Block)]) - 1))
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
G=nx.Graph()
|
||||
|
||||
#positions = {genesis.hash: 0, beacon_genesis.hash: 0}
|
||||
#queue = [
|
||||
|
||||
for b in n.blocks.values():
|
||||
if b.height > 0:
|
||||
if isinstance(b, Block):
|
||||
G.add_edge(b.hash, b.parent_hash, color='b')
|
||||
for s in n.sigs.values():
|
||||
G.add_edge(s.hash, s.targets[0], color='0.75')
|
||||
|
||||
|
||||
cache = {genesis.hash: 0}
|
||||
|
||||
def mkoffset(b):
|
||||
if b.hash not in cache:
|
||||
cache[b.hash] = cache[b.parent_hash] + random.randrange(35)
|
||||
return cache[b.hash]
|
||||
|
||||
pos={b'\x00'*32: (0, 0)}
|
||||
for b in sorted(n.blocks.values(), key=lambda b: b.height):
|
||||
x,y = pos[b.parent_hash]
|
||||
pos[b.hash] = (x + (random.randrange(5) if b.hash in n.main_chain else -random.randrange(5)), y+10)
|
||||
for s in n.sigs.values():
|
||||
parent = n.blocks[s.targets[0]]
|
||||
x,y = pos[parent.hash]
|
||||
pos[s.hash] = (x - 2 + random.randrange(5),
|
||||
y + 5)
|
||||
|
||||
finalized = {k:v for k,v in pos.items() if k in n.finalized}
|
||||
justified = {k:v for k,v in pos.items() if k in n.justified and k not in n.finalized}
|
||||
unjustified = {k:v for k,v in pos.items() if k not in n.justified and k in n.blocks}
|
||||
sigs = {k:v for k,v in pos.items() if k not in n.blocks}
|
||||
|
||||
edges = G.edges()
|
||||
colors = [G[u][v]['color'] for u,v in edges]
|
||||
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=sigs.keys(), node_size=5, node_shape='o',node_color='0.75')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=unjustified.keys(), node_size=10, node_shape='o',node_color='0.75')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=justified.keys(), node_size=16, node_shape='o',node_color='y')
|
||||
nx.draw_networkx_nodes(G, pos, nodelist=finalized.keys(), node_size=25, node_shape='o',node_color='g')
|
||||
# nx.draw_networkx_labels(G, pos, {h: n.scores.get(h, 0) for h in n.blocks.keys()}, font_size=5)
|
||||
|
||||
blockedges = [(u,v) for (u,v) in edges if G[u][v]['color'] == 'b']
|
||||
otheredges = [(u,v) for (u,v) in edges if G[u][v]['color'] == '0.75']
|
||||
nx.draw_networkx_edges(G, pos, edgelist=otheredges, width=1, edge_color='0.75')
|
||||
nx.draw_networkx_edges(G, pos, edgelist=blockedges, width=2, edge_color='b')
|
||||
|
||||
print('Scores:', [n.scores.get(c, 0) for c in n.main_chain])
|
||||
print('Timestamps:', [n.get_adjusted_timestamp() for n in notaries])
|
||||
|
||||
plt.axis('off')
|
||||
# plt.savefig("degree.png", bbox_inches="tight")
|
||||
plt.show()
|
||||
@@ -1,75 +0,0 @@
|
||||
from distributions import transform, normal_distribution
|
||||
import random
|
||||
|
||||
|
||||
class NetworkSimulator():
|
||||
|
||||
def __init__(self, latency=50):
|
||||
self.agents = []
|
||||
self.latency_distribution_sample = transform(normal_distribution(latency, (latency * 2) // 5), lambda x: max(x, 0))
|
||||
self.time = 0
|
||||
self.objqueue = {}
|
||||
self.peers = {}
|
||||
self.reliability = 0.9
|
||||
|
||||
def generate_peers(self, num_peers=5):
|
||||
self.peers = {}
|
||||
for a in self.agents:
|
||||
p = []
|
||||
while len(p) <= num_peers // 2:
|
||||
p.append(random.choice(self.agents))
|
||||
if p[-1] == a:
|
||||
p.pop()
|
||||
self.peers[a.id] = self.peers.get(a.id, []) + p
|
||||
for peer in p:
|
||||
self.peers[peer.id] = self.peers.get(peer.id, []) + [a]
|
||||
|
||||
def tick(self):
|
||||
if self.time in self.objqueue:
|
||||
for recipient, obj in self.objqueue[self.time]:
|
||||
if random.random() < self.reliability:
|
||||
recipient.on_receive(obj)
|
||||
del self.objqueue[self.time]
|
||||
for a in self.agents:
|
||||
a.tick()
|
||||
self.time += 1
|
||||
|
||||
def run(self, steps):
|
||||
for i in range(steps):
|
||||
self.tick()
|
||||
|
||||
def broadcast(self, sender, obj):
|
||||
for p in self.peers[sender.id]:
|
||||
recv_time = self.time + self.latency_distribution_sample()
|
||||
if recv_time not in self.objqueue:
|
||||
self.objqueue[recv_time] = []
|
||||
self.objqueue[recv_time].append((p, obj))
|
||||
|
||||
def direct_send(self, to_id, obj):
|
||||
for a in self.agents:
|
||||
if a.id == to_id:
|
||||
recv_time = self.time + self.latency_distribution_sample()
|
||||
if recv_time not in self.objqueue:
|
||||
self.objqueue[recv_time] = []
|
||||
self.objqueue[recv_time].append((a, obj))
|
||||
|
||||
def knock_offline_random(self, n):
|
||||
ko = {}
|
||||
while len(ko) < n:
|
||||
c = random.choice(self.agents)
|
||||
ko[c.id] = c
|
||||
for c in ko.values():
|
||||
self.peers[c.id] = []
|
||||
for a in self.agents:
|
||||
self.peers[a.id] = [x for x in self.peers[a.id] if x.id not in ko]
|
||||
|
||||
def partition(self):
|
||||
a = {}
|
||||
while len(a) < len(self.agents) / 2:
|
||||
c = random.choice(self.agents)
|
||||
a[c.id] = c
|
||||
for c in self.agents:
|
||||
if c.id in a:
|
||||
self.peers[c.id] = [x for x in self.peers[c.id] if x.id in a]
|
||||
else:
|
||||
self.peers[c.id] = [x for x in self.peers[c.id] if x.id not in a]
|
||||
@@ -1,251 +0,0 @@
|
||||
import os
|
||||
from binascii import hexlify
|
||||
from Crypto.Hash import keccak
|
||||
import random
|
||||
|
||||
def to_hex(s):
|
||||
return hexlify(s).decode('utf-8')
|
||||
|
||||
memo = {}
|
||||
|
||||
def sha3(x):
|
||||
if x not in memo:
|
||||
memo[x] = keccak.new(digest_bits=256, data=x).digest()
|
||||
return memo[x]
|
||||
|
||||
def hash_to_int(h):
|
||||
o = 0
|
||||
for c in h:
|
||||
o = (o << 8) + c
|
||||
return o
|
||||
|
||||
NOTARIES = 20
|
||||
BASE_TS_DIFF = 1
|
||||
SKIP_TS_DIFF = 6
|
||||
SAMPLE = 8
|
||||
MIN_SAMPLE = lambda x: [6, 4, 3, 2, 2][x] if x < 5 else 1
|
||||
POWDIFF = 50 * NOTARIES
|
||||
SHARDS = 12
|
||||
|
||||
# Not a full RANDAO; stub for now
|
||||
class Block():
|
||||
def __init__(self, parent, proposer, ts, sigs):
|
||||
self.contents = os.urandom(32)
|
||||
self.parent_hash = parent.hash if parent else (b'\x11' * 32)
|
||||
self.hash = sha3(self.parent_hash + self.contents)
|
||||
self.ts = ts
|
||||
self.sigs = sigs
|
||||
self.number = parent.number + 1 if parent else 0
|
||||
|
||||
if parent:
|
||||
i = parent.child_proposers.index(proposer)
|
||||
assert self.ts >= parent.ts + BASE_TS_DIFF + i * SKIP_TS_DIFF
|
||||
assert len(sigs) >= parent.notary_req
|
||||
for sig in sigs:
|
||||
assert sig.target_hash == self.parent_hash
|
||||
|
||||
# Calculate child proposers
|
||||
v = hash_to_int(sha3(self.contents))
|
||||
self.child_proposers = []
|
||||
while v > 0:
|
||||
self.child_proposers.append(v % NOTARIES)
|
||||
v //= NOTARIES
|
||||
|
||||
# Calculate notaries
|
||||
if not parent:
|
||||
index = 0
|
||||
elif proposer in parent.child_proposers:
|
||||
index = parent.child_proposers.index(proposer)
|
||||
else:
|
||||
index = len(parent.child_proposers)
|
||||
self.notary_req = MIN_SAMPLE(index)
|
||||
v = hash_to_int(sha3(self.contents + b':n'))
|
||||
self.notaries = []
|
||||
for i in range(SAMPLE):
|
||||
self.notaries.append(v % NOTARIES)
|
||||
v //= NOTARIES
|
||||
|
||||
# Calculate shard proposers
|
||||
v = hash_to_int(sha3(self.contents + b':s'))
|
||||
self.shard_proposers = []
|
||||
for i in range(SHARDS):
|
||||
self.shard_proposers.append(v % NOTARIES)
|
||||
v //= NOTARIES
|
||||
|
||||
|
||||
class Sig():
|
||||
def __init__(self, proposer, target):
|
||||
self.proposer = proposer
|
||||
self.target_hash = target.hash
|
||||
self.hash = os.urandom(32)
|
||||
assert self.proposer in target.notaries
|
||||
|
||||
genesis = Block(None, 1, 0, [])
|
||||
|
||||
class BlockMakingRequest():
|
||||
def __init__(self, parent, ts):
|
||||
self.parent = parent
|
||||
self.ts = ts
|
||||
self.hash = os.urandom(32)
|
||||
|
||||
class Node():
|
||||
|
||||
def __init__(self, _id, network, sleepy=False, careless=False, ts=0):
|
||||
self.blocks = {
|
||||
genesis.hash: genesis,
|
||||
}
|
||||
self.sigs = {}
|
||||
self.main_chain = [genesis.hash]
|
||||
self.timequeue = []
|
||||
self.parentqueue = {}
|
||||
self.children = {}
|
||||
self.ts = ts
|
||||
self.id = _id
|
||||
self.network = network
|
||||
self.used_parents = {}
|
||||
self.processed = {}
|
||||
self.sleepy = sleepy
|
||||
self.careless = careless
|
||||
self.first_round = True
|
||||
|
||||
def broadcast(self, x):
|
||||
if self.sleepy and self.ts:
|
||||
return
|
||||
self.network.broadcast(self, x)
|
||||
self.on_receive(x)
|
||||
|
||||
def log(self, words, lvl=3, all=False):
|
||||
#if "Tick:" != words[:5] or self.id == 0:
|
||||
if (self.id == 0 or all) and lvl >= 2:
|
||||
print(self.id, words)
|
||||
|
||||
def on_receive(self, obj, reprocess=False):
|
||||
if obj.hash in self.processed and not reprocess:
|
||||
return
|
||||
self.processed[obj.hash] = obj
|
||||
if isinstance(obj, Block):
|
||||
return self.on_receive_beacon_block(obj)
|
||||
elif isinstance(obj, Sig):
|
||||
return self.on_receive_sig(obj)
|
||||
elif isinstance(obj, BlockMakingRequest):
|
||||
if self.main_chain[-1] == obj.parent:
|
||||
mc_ref = self.blocks[obj.parent]
|
||||
for i in range(2):
|
||||
if mc_ref.number == 0:
|
||||
break
|
||||
#mc_ref = self.blocks[mc_ref].parent_hash
|
||||
x = Block(self.blocks[obj.parent], self.id, self.ts,
|
||||
self.sigs[obj.parent] if obj.parent in self.sigs else [])
|
||||
self.log("Broadcasting block %s" % to_hex(x.hash[:4]))
|
||||
self.broadcast(x)
|
||||
|
||||
def add_to_timequeue(self, obj):
|
||||
i = 0
|
||||
while i < len(self.timequeue) and self.timequeue[i].ts < obj.ts:
|
||||
i += 1
|
||||
self.timequeue.insert(i, obj)
|
||||
|
||||
def add_to_multiset(self, _set, k, v):
|
||||
if k not in _set:
|
||||
_set[k] = []
|
||||
_set[k].append(v)
|
||||
|
||||
def change_head(self, chain, new_head):
|
||||
chain.extend([None] * (new_head.number + 1 - len(chain)))
|
||||
i, c = new_head.number, new_head.hash
|
||||
while c != chain[i]:
|
||||
chain[i] = c
|
||||
c = self.blocks[c].parent_hash
|
||||
i -= 1
|
||||
for i in range(len(chain)):
|
||||
assert self.blocks[chain[i]].number == i
|
||||
|
||||
def recalculate_head(self, chain, condition):
|
||||
while not condition(self.blocks[chain[-1]]):
|
||||
chain.pop()
|
||||
descendant_queue = [chain[-1]]
|
||||
new_head = chain[-1]
|
||||
while len(descendant_queue):
|
||||
first = descendant_queue.pop(0)
|
||||
if first in self.children:
|
||||
for c in self.children[first]:
|
||||
if condition(self.blocks[c]):
|
||||
descendant_queue.append(c)
|
||||
if self.blocks[first].number > self.blocks[new_head].number:
|
||||
new_head = first
|
||||
self.change_head(chain, self.blocks[new_head])
|
||||
for i in range(len(chain)):
|
||||
assert condition(self.blocks[chain[i]])
|
||||
|
||||
def process_children(self, h):
|
||||
if h in self.parentqueue:
|
||||
for b in self.parentqueue[h]:
|
||||
self.on_receive(b, reprocess=True)
|
||||
del self.parentqueue[h]
|
||||
|
||||
def is_descendant(self, a, b):
|
||||
a, b = self.blocks[a], self.blocks[b]
|
||||
while b.number > a.number:
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a.hash == b.hash
|
||||
|
||||
def have_ancestry(self, h):
|
||||
while h != genesis.hash:
|
||||
if h not in self.processed:
|
||||
return False
|
||||
h = self.processed[h].parent_hash
|
||||
return True
|
||||
|
||||
def is_notarized(self, b):
|
||||
return len(self.sigs.get(b.hash, [])) >= b.notary_req
|
||||
|
||||
def on_receive_beacon_block(self, block):
|
||||
# Parent not yet received
|
||||
if block.parent_hash not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, block.parent_hash, block)
|
||||
return
|
||||
# Too early
|
||||
if block.ts > self.ts:
|
||||
self.add_to_timequeue(block)
|
||||
return
|
||||
# Add the block
|
||||
self.log("Processing beacon block %s" % to_hex(block.hash[:4]))
|
||||
self.blocks[block.hash] = block
|
||||
# Am I a notary, and is the block building on the head? Then broadcast a signature.
|
||||
if block.parent_hash == self.main_chain[-1] or self.careless:
|
||||
if self.id in block.notaries:
|
||||
self.broadcast(Sig(self.id, block))
|
||||
# Add child record
|
||||
self.add_to_multiset(self.children, block.parent_hash, block.hash)
|
||||
# Final steps
|
||||
self.process_children(block.hash)
|
||||
self.network.broadcast(self, block)
|
||||
|
||||
def on_receive_sig(self, sig):
|
||||
if sig.target_hash not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, sig.target_hash, sig)
|
||||
return
|
||||
# Add to head? Make a block?
|
||||
self.add_to_multiset(self.sigs, sig.target_hash, sig)
|
||||
if len(self.sigs[sig.target_hash]) == self.blocks[sig.target_hash].notary_req:
|
||||
block = self.blocks[sig.target_hash]
|
||||
if block.number > self.blocks[self.main_chain[-1]].number:
|
||||
self.change_head(self.main_chain, block)
|
||||
if self.id in block.child_proposers:
|
||||
my_index = block.child_proposers.index(self.id)
|
||||
target_ts = block.ts + BASE_TS_DIFF + my_index * SKIP_TS_DIFF
|
||||
self.log("Making block request for %.1f" % target_ts)
|
||||
self.add_to_timequeue(BlockMakingRequest(block.hash, target_ts))
|
||||
# Rebroadcast
|
||||
self.network.broadcast(self, sig)
|
||||
|
||||
def tick(self):
|
||||
if self.first_round:
|
||||
if self.id in genesis.notaries:
|
||||
self.broadcast(Sig(self.id, genesis))
|
||||
self.first_round = False
|
||||
self.ts += 0.1
|
||||
self.log("Tick: %.1f" % self.ts, lvl=1)
|
||||
# Process time queue
|
||||
while len(self.timequeue) and self.timequeue[0].ts <= self.ts:
|
||||
self.on_receive(self.timequeue.pop(0), reprocess=True)
|
||||
@@ -1,161 +0,0 @@
|
||||
import os
|
||||
from binascii import hexlify
|
||||
from Crypto.Hash import keccak
|
||||
import random
|
||||
|
||||
def to_hex(s):
|
||||
return hexlify(s).decode('utf-8')
|
||||
|
||||
memo = {}
|
||||
|
||||
def sha3(x):
|
||||
if x not in memo:
|
||||
memo[x] = keccak.new(digest_bits=256, data=x).digest()
|
||||
return memo[x]
|
||||
|
||||
def hash_to_int(h):
|
||||
o = 0
|
||||
for c in h:
|
||||
o = (o << 8) + c
|
||||
return o
|
||||
|
||||
NOTARIES = 40
|
||||
BASE_TS_DIFF = 1
|
||||
SKIP_TS_DIFF = 6
|
||||
SAMPLE = 8
|
||||
MIN_SAMPLE = 7
|
||||
POWDIFF = 50 * NOTARIES
|
||||
SHARDS = 12
|
||||
|
||||
def checkpow(work, nonce):
|
||||
# Discrete log PoW, lolz
|
||||
# Quadratic nonresidues only
|
||||
return pow(work, nonce, 65537) * POWDIFF < 65537 * 2 and pow(nonce, 32768, 65537) == 65536
|
||||
|
||||
class Block():
|
||||
def __init__(self, parent, pownonce, ts):
|
||||
self.parent_hash = parent.hash if parent else (b'\x00' * 32)
|
||||
assert isinstance(self.parent_hash, bytes)
|
||||
self.hash = sha3(self.parent_hash + str(pownonce).encode('utf-8'))
|
||||
self.ts = ts
|
||||
if parent:
|
||||
assert checkpow(parent.pownonce, pownonce)
|
||||
assert self.ts >= parent.ts
|
||||
self.pownonce = pownonce
|
||||
self.number = 0 if parent is None else parent.number + 1
|
||||
|
||||
|
||||
genesis = Block(None, 59049, 0)
|
||||
|
||||
class Node():
|
||||
|
||||
def __init__(self, _id, network, sleepy=False, careless=False, ts=0):
|
||||
self.blocks = {
|
||||
genesis.hash: genesis
|
||||
}
|
||||
self.main_chain = [genesis.hash]
|
||||
self.timequeue = []
|
||||
self.parentqueue = {}
|
||||
self.children = {}
|
||||
self.ts = ts
|
||||
self.id = _id
|
||||
self.network = network
|
||||
self.used_parents = {}
|
||||
self.processed = {}
|
||||
self.sleepy = sleepy
|
||||
self.careless = careless
|
||||
|
||||
def broadcast(self, x):
|
||||
if self.sleepy and self.ts:
|
||||
return
|
||||
self.network.broadcast(self, x)
|
||||
self.on_receive(x)
|
||||
|
||||
def log(self, words, lvl=3, all=False):
|
||||
#if "Tick:" != words[:5] or self.id == 0:
|
||||
if (self.id == 0 or all) and lvl >= 2:
|
||||
print(self.id, words)
|
||||
|
||||
def on_receive(self, obj, reprocess=False):
|
||||
if obj.hash in self.processed and not reprocess:
|
||||
return
|
||||
self.processed[obj.hash] = obj
|
||||
if isinstance(obj, Block):
|
||||
return self.on_receive_main_block(obj)
|
||||
|
||||
def add_to_timequeue(self, obj):
|
||||
i = 0
|
||||
while i < len(self.timequeue) and self.timequeue[i].ts < obj.ts:
|
||||
i += 1
|
||||
self.timequeue.insert(i, obj)
|
||||
|
||||
def add_to_multiset(self, _set, k, v):
|
||||
if k not in _set:
|
||||
_set[k] = []
|
||||
_set[k].append(v)
|
||||
|
||||
def change_head(self, chain, new_head):
|
||||
chain.extend([None] * (new_head.number + 1 - len(chain)))
|
||||
i, c = new_head.number, new_head.hash
|
||||
while c != chain[i]:
|
||||
chain[i] = c
|
||||
c = self.blocks[c].parent_hash
|
||||
i -= 1
|
||||
for i in range(len(chain)):
|
||||
assert self.blocks[chain[i]].number == i
|
||||
assert self.blocks[chain[i]].ts <= self.ts
|
||||
|
||||
def process_children(self, h):
|
||||
if h in self.parentqueue:
|
||||
for b in self.parentqueue[h]:
|
||||
self.on_receive(b, reprocess=True)
|
||||
del self.parentqueue[h]
|
||||
|
||||
def have_ancestry(self, h):
|
||||
while h != genesis.hash:
|
||||
if h not in self.processed:
|
||||
return False
|
||||
h = self.processed[h].parent_hash
|
||||
return True
|
||||
|
||||
def is_notarized(self, b):
|
||||
return b.hash in self.children
|
||||
|
||||
def on_receive_main_block(self, block):
|
||||
# Parent not yet received
|
||||
if block.parent_hash not in self.blocks:
|
||||
self.add_to_multiset(self.parentqueue, block.parent_hash, block)
|
||||
return None
|
||||
if block.ts > self.ts:
|
||||
self.add_to_timequeue(block)
|
||||
return None
|
||||
self.log("Processing main chain block %s" % to_hex(block.hash[:4]))
|
||||
self.blocks[block.hash] = block
|
||||
# Reorg the main chain if new head
|
||||
if block.number > self.blocks[self.main_chain[-1]].number:
|
||||
reorging = (block.parent_hash != self.main_chain[-1])
|
||||
self.change_head(self.main_chain, block)
|
||||
# Add child record
|
||||
self.add_to_multiset(self.children, block.parent_hash, block.hash)
|
||||
# Final steps
|
||||
self.process_children(block.hash)
|
||||
self.network.broadcast(self, block)
|
||||
|
||||
def is_descendant(self, a, b):
|
||||
a, b = self.blocks[a], self.blocks[b]
|
||||
while b.number > a.number:
|
||||
b = self.blocks[b.parent_hash]
|
||||
return a.hash == b.hash
|
||||
|
||||
def tick(self):
|
||||
self.ts += 0.1
|
||||
self.log("Tick: %.1f" % self.ts, lvl=1)
|
||||
# Process time queue
|
||||
while len(self.timequeue) and self.timequeue[0].ts <= self.ts:
|
||||
self.on_receive(self.timequeue.pop(0), reprocess=True)
|
||||
# Attempt to mine a main chain block
|
||||
pownonce = random.randrange(65537)
|
||||
mchead = self.blocks[self.main_chain[-1]]
|
||||
if checkpow(mchead.pownonce, pownonce):
|
||||
assert self.ts >= mchead.ts
|
||||
self.broadcast(Block(mchead, pownonce, self.ts))
|
||||
@@ -1,55 +0,0 @@
|
||||
from networksim import NetworkSimulator
|
||||
from ghost_node import Node, NOTARIES, Block, genesis
|
||||
from distributions import normal_distribution
|
||||
|
||||
net = NetworkSimulator(latency=22)
|
||||
notaries = [Node(i, net, ts=max(normal_distribution(300, 300)(), 0) * 0.1, sleepy=i%4==0) for i in range(NOTARIES)]
|
||||
net.agents = notaries
|
||||
net.generate_peers()
|
||||
for i in range(100000):
|
||||
net.tick()
|
||||
for n in notaries:
|
||||
print("Local timestamp: %.1f, timequeue len %d" % (n.ts, len(n.timequeue)))
|
||||
print("Main chain head: %d" % n.blocks[n.main_chain[-1]].number)
|
||||
print("Total main chain blocks received: %d" % (len([b for b in n.blocks.values() if isinstance(b, Block)]) - 1))
|
||||
print("Notarized main chain blocks received: %d" % (len([b for b in n.blocks.values() if isinstance(b, Block) and n.is_notarized(b)]) - 1))
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
G=nx.Graph()
|
||||
|
||||
#positions = {genesis.hash: 0, beacon_genesis.hash: 0}
|
||||
#queue = [
|
||||
|
||||
for b in n.blocks.values():
|
||||
for en in notaries:
|
||||
if isinstance(b, Block) and b.hash in en.processed and b.hash not in en.blocks:
|
||||
assert (not en.have_ancestry(b.hash)) or b.ts > en.ts
|
||||
if b.number > 0:
|
||||
if isinstance(b, Block):
|
||||
if n.is_notarized(b):
|
||||
G.add_edge(b.hash, b.parent_hash, color='b')
|
||||
else:
|
||||
G.add_edge(b.hash, b.parent_hash, color='#dddddd')
|
||||
|
||||
|
||||
cache = {genesis.hash: 0}
|
||||
|
||||
def mkoffset(b):
|
||||
if b.hash not in cache:
|
||||
cache[b.hash] = cache[b.parent_hash] + random.randrange(35)
|
||||
return cache[b.hash]
|
||||
|
||||
pos={b.hash: (b.ts + mkoffset(b), b.ts) for b in n.blocks.values()}
|
||||
edges = G.edges()
|
||||
colors = [G[u][v]['color'] for u,v in edges]
|
||||
nx.draw_networkx_nodes(G,pos,node_size=10,node_shape='o',node_color='0.75')
|
||||
|
||||
nx.draw_networkx_edges(G,pos,
|
||||
width=2,edge_color=colors)
|
||||
|
||||
plt.axis('off')
|
||||
# plt.savefig("degree.png", bbox_inches="tight")
|
||||
plt.show()
|
||||
@@ -1,49 +0,0 @@
|
||||
from ethereum.utils import sha3
|
||||
import sys
|
||||
|
||||
STEPLENGTH = 100
|
||||
|
||||
dp = {}
|
||||
|
||||
def step(inp):
|
||||
return sha3('function_'+inp.encode('hex')+'()')[:4]
|
||||
|
||||
def run_round(inp):
|
||||
orig_inp = inp
|
||||
for i in range(STEPLENGTH):
|
||||
inp = step(inp)
|
||||
if inp in dp.keys():
|
||||
print 'Found!', i + 1, repr(inp)
|
||||
return(True, i + 1, inp)
|
||||
dp[inp] = orig_inp
|
||||
return(False, None, inp)
|
||||
|
||||
y = '\xff' * 4
|
||||
orig_y = y
|
||||
rounds = 0
|
||||
while 1:
|
||||
print 'Running round', rounds
|
||||
rounds += 1
|
||||
x, t, y2 = run_round(y)
|
||||
if x:
|
||||
prev1, prev2 = y, dp[y2]
|
||||
assert prev1 != prev2
|
||||
# print '-----'
|
||||
for i in range(STEPLENGTH - t):
|
||||
# print repr(prev2)
|
||||
prev2 = step(prev2)
|
||||
# print '-----'
|
||||
for i in range(t):
|
||||
# print repr(prev1), repr(prev2)
|
||||
next1 = step(prev1)
|
||||
next2 = step(prev2)
|
||||
if next1 == next2:
|
||||
print 'Found!'
|
||||
print 'function_'+prev1.encode('hex')+'()'
|
||||
print 'function_'+prev2.encode('hex')+'()'
|
||||
sys.exit()
|
||||
prev1, prev2 = next1, next2
|
||||
# print repr(prev1), repr(prev2)
|
||||
raise Exception("Something weird happened")
|
||||
else:
|
||||
y = y2
|
||||
@@ -1 +0,0 @@
|
||||
60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e6e504a1461005a5780633ccfd60b14610069578063eedcf50a14610078578063fdf97cb2146100b157610058565b005b61006760048050506100ea565b005b6100766004805050610277565b005b6100856004805050610424565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100be600480505061043c565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16600073bb9bc244d798123fde783fcc1c72d3bb8c18941373ffffffffffffffffffffffffffffffffffffffff166318160ddd604051817c01000000000000000000000000000000000000000000000000000000000281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015073bb9bc244d798123fde783fcc1c72d3bb8c18941373ffffffffffffffffffffffffffffffffffffffff166370a0823130604051827c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1681526020019150506020604051808303816000876161da5a03f11561000257505050604051805190602001503073ffffffffffffffffffffffffffffffffffffffff16310103604051809050600060405180830381858888f19350505050505b565b600073bb9bc244d798123fde783fcc1c72d3bb8c18941373ffffffffffffffffffffffffffffffffffffffff166370a0823133604051827c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1681526020019150506020604051808303816000876161da5a03f1156100025750505060405180519060200150905073bb9bc244d798123fde783fcc1c72d3bb8c18941373ffffffffffffffffffffffffffffffffffffffff166323b872dd333084604051847c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200193505050506020604051808303816000876161da5a03f1156100025750505060405180519060200150158061041657503373ffffffffffffffffffffffffffffffffffffffff16600082604051809050600060405180830381858888f19350505050155b1561042057610002565b5b50565b73bb9bc244d798123fde783fcc1c72d3bb8c18941381565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff168156
|
||||
File diff suppressed because one or more lines are too long
@@ -1,196 +0,0 @@
|
||||
from ethereum.state import State
|
||||
import json
|
||||
from ethereum import abi
|
||||
from ethereum.utils import normalize_address
|
||||
from ethereum.state_transition import apply_transaction, apply_const_message
|
||||
from ethereum.vm import Message, CallData
|
||||
from ethereum.config import Env
|
||||
from ethereum.parse_genesis_declaration import mk_basic_state
|
||||
from ethereum.transactions import Transaction
|
||||
|
||||
account_dict = json.load(open('dao_dump.json'))
|
||||
withdrawer_code = '0x' + open('bytecode.txt').read().strip()
|
||||
true, false = True, False
|
||||
withdrawer_ct = abi.ContractTranslator([{"constant":false,"inputs":[],"name":"trusteeWithdraw","outputs":[],"type":"function"},{"constant":false,"inputs":[],"name":"withdraw","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"mainDAO","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":true,"inputs":[],"name":"trustee","outputs":[{"name":"","type":"address"}],"type":"function"}])
|
||||
dao_ct = abi.ContractTranslator([{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_amount","type":"uint256"}],"name":"approve","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_amount","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"type":"function"},{"constant":true,"inputs":[],"name":"standard","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"balance","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_amount","type":"uint256"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"},{"name":"_spender","type":"address"}],"name":"allowance","outputs":[{"name":"remaining","type":"uint256"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_from","type":"address"},{"indexed":true,"name":"_to","type":"address"},{"indexed":false,"name":"_amount","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_owner","type":"address"},{"indexed":true,"name":"_spender","type":"address"},{"indexed":false,"name":"_amount","type":"uint256"}],"name":"Approval","type":"event"}])
|
||||
|
||||
# Initialize state
|
||||
|
||||
dao = "0xbb9bc244d798123fde783fcc1c72d3bb8c189413"
|
||||
withdrawer = "0xbf4ed7b27f1d666546e30d74d50d173d20bca754"
|
||||
my_account = "0x1db3439a222c519ab44bb1144fc28167b4fa6ee6"
|
||||
my_other_account = "0xd8da6bf26964af9d7eed9e03e53415d37aa96045"
|
||||
curator = "0xda4a4626d3e16e094de3225a751aab7128e96526"
|
||||
|
||||
state = mk_basic_state({
|
||||
dao: account_dict,
|
||||
withdrawer: {
|
||||
"code": withdrawer_code,
|
||||
"balance": "12072877497524777000000000",
|
||||
"storage": {
|
||||
"0x": "0xda4a4626d3e16e094de3225a751aab7128e96526"
|
||||
}
|
||||
},
|
||||
}, {
|
||||
"number": 1920001,
|
||||
"gas_limit": 4712388,
|
||||
"gas_used": 0,
|
||||
"timestamp": 1467446877,
|
||||
"difficulty": 2**25,
|
||||
"hash": '00' * 32,
|
||||
"uncles_hash": '00' * 32
|
||||
}, Env())
|
||||
|
||||
def get_dao_balance(state, address):
|
||||
msg_data = CallData([ord(x) for x in dao_ct.encode('balanceOf', [address])])
|
||||
msg = Message(normalize_address(address), normalize_address(dao), 0, 1000000, msg_data, code_address=normalize_address(dao))
|
||||
output = ''.join(map(chr, apply_const_message(state, msg)))
|
||||
return dao_ct.decode('balanceOf', output)[0]
|
||||
|
||||
import sys
|
||||
state.log_listeners.append(lambda x: sys.stdout.write(str(dao_ct.listen(x))+'\n'))
|
||||
state.log_listeners.append(lambda x: sys.stdout.write(str(withdrawer_ct.listen(x))+'\n'))
|
||||
|
||||
print 'State created'
|
||||
|
||||
# Check pre-balance
|
||||
|
||||
pre_balance = state.get_balance(my_account)
|
||||
pre_dao_tokens = get_dao_balance(state, my_account)
|
||||
pre_withdrawer_balance = state.get_balance(withdrawer)
|
||||
|
||||
print 'Pre ETH (wei) balance: %d' % pre_balance
|
||||
print 'Pre DAO (base unit) balance: %d' % pre_dao_tokens
|
||||
|
||||
# Attempt to claim the ETH without approving (should fail)
|
||||
|
||||
tx0 = Transaction(state.get_nonce(my_account), 0, 1000000, withdrawer, 0, withdrawer_ct.encode('withdraw', [])).sign('\x33' * 32)
|
||||
tx0._sender = normalize_address(my_account)
|
||||
apply_transaction(state, tx0)
|
||||
|
||||
med_balance = state.get_balance(my_account)
|
||||
med_dao_tokens = get_dao_balance(state, my_account)
|
||||
med_withdrawer_balance = state.get_balance(withdrawer)
|
||||
|
||||
assert med_balance == pre_balance
|
||||
assert med_dao_tokens == pre_dao_tokens
|
||||
assert med_withdrawer_balance == pre_withdrawer_balance > 0
|
||||
|
||||
print 'ETH claim without approving failed, as expected'
|
||||
|
||||
# Approve the withdrawal
|
||||
|
||||
tx1 = Transaction(state.get_nonce(my_account), 0, 1000000, dao, 0, dao_ct.encode('approve', [withdrawer, 100000 * 10**18])).sign('\x33' * 32)
|
||||
tx1._sender = normalize_address(my_account)
|
||||
apply_transaction(state, tx1)
|
||||
|
||||
# Check allowance
|
||||
|
||||
allowance = dao_ct.decode('allowance', ''.join(map(chr, apply_const_message(state, Message(normalize_address(my_account), normalize_address(dao), 0, 1000000, CallData([ord(x) for x in dao_ct.encode('allowance', [my_account, withdrawer])]), code_address=dao)))))[0]
|
||||
assert allowance == 100000 * 10**18, allowance
|
||||
print 'Allowance verified'
|
||||
|
||||
# Claim the ETH
|
||||
|
||||
tx2 = Transaction(state.get_nonce(my_account), 0, 1000000, withdrawer, 0, withdrawer_ct.encode('withdraw', [])).sign('\x33' * 32)
|
||||
tx2._sender = normalize_address(my_account)
|
||||
apply_transaction(state, tx2)
|
||||
|
||||
# Compare post_balance
|
||||
|
||||
post_balance = state.get_balance(my_account)
|
||||
post_dao_tokens = get_dao_balance(state, my_account)
|
||||
|
||||
print 'Post ETH (wei) balance: %d' % post_balance
|
||||
print 'Post DAO (base unit) balance: %d' % post_dao_tokens
|
||||
|
||||
assert post_dao_tokens == 0
|
||||
assert post_balance - pre_balance == pre_dao_tokens
|
||||
|
||||
print 'Withdrawing once works'
|
||||
|
||||
# Try to claim post_balance again, should have no effect
|
||||
|
||||
tx3 = Transaction(state.get_nonce(my_account), 0, 1000000, withdrawer, 0, withdrawer_ct.encode('withdraw', [])).sign('\x33' * 32)
|
||||
tx3._sender = normalize_address(my_account)
|
||||
apply_transaction(state, tx3)
|
||||
|
||||
post_balance2 = state.get_balance(my_account)
|
||||
post_dao_tokens2 = get_dao_balance(state, my_account)
|
||||
|
||||
assert post_balance2 == post_balance
|
||||
assert post_dao_tokens2 == post_dao_tokens
|
||||
|
||||
# Curator withdraw
|
||||
|
||||
pre_curator_balance = state.get_balance(curator)
|
||||
pre_withdrawer_balance = state.get_balance(withdrawer)
|
||||
|
||||
# from ethereum.slogging import LogRecorder, configure_logging, set_level
|
||||
# config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
|
||||
# configure_logging(config_string=config_string)
|
||||
|
||||
tx4 = Transaction(0, 0, 1000000, withdrawer, 0, withdrawer_ct.encode('trusteeWithdraw', [])).sign('\x33' * 32)
|
||||
apply_transaction(state, tx4)
|
||||
|
||||
post_curator_balance = state.get_balance(curator)
|
||||
post_withdrawer_balance = state.get_balance(withdrawer)
|
||||
print 'Curator withdrawn', post_curator_balance - pre_curator_balance
|
||||
|
||||
assert 500000 * 10**18 < post_curator_balance - pre_curator_balance < 600000 * 10**18
|
||||
assert pre_curator_balance + pre_withdrawer_balance == post_curator_balance + post_withdrawer_balance
|
||||
|
||||
tx5 = Transaction(1, 0, 1000000, withdrawer, 0, withdrawer_ct.encode('trusteeWithdraw', [])).sign('\x33' * 32)
|
||||
apply_transaction(state, tx5)
|
||||
|
||||
post_curator_balance2 = state.get_balance(curator)
|
||||
post_withdrawer_balance2 = state.get_balance(withdrawer)
|
||||
assert post_curator_balance2 == post_curator_balance
|
||||
assert post_withdrawer_balance2 == post_withdrawer_balance
|
||||
|
||||
print 'Second withdrawal has no effect as expected'
|
||||
|
||||
# Withdraw again, and try curator withdrawing again
|
||||
|
||||
tx6 = Transaction(state.get_nonce(my_other_account), 0, 1000000, dao, 0, dao_ct.encode('approve', [withdrawer, 100000 * 10**18])).sign('\x33' * 32)
|
||||
tx6._sender = normalize_address(my_other_account)
|
||||
apply_transaction(state, tx6)
|
||||
|
||||
tx7 = Transaction(state.get_nonce(my_other_account), 0, 1000000, withdrawer, 0, withdrawer_ct.encode('withdraw', [])).sign('\x33' * 32)
|
||||
tx7._sender = normalize_address(my_other_account)
|
||||
apply_transaction(state, tx7)
|
||||
|
||||
post_withdrawer_balance3 = state.get_balance(withdrawer)
|
||||
print 'Another %d wei withdrawn' % (post_withdrawer_balance2 - post_withdrawer_balance3)
|
||||
assert post_withdrawer_balance3 < post_withdrawer_balance2
|
||||
|
||||
tx8 = Transaction(2, 0, 1000000, withdrawer, 0, withdrawer_ct.encode('trusteeWithdraw', [])).sign('\x33' * 32)
|
||||
apply_transaction(state, tx8)
|
||||
|
||||
post_curator_balance3 = state.get_balance(curator)
|
||||
assert post_curator_balance2 == post_curator_balance
|
||||
|
||||
print 'Third withdrawal has no effect as expected'
|
||||
|
||||
# Withdraw from an account with no DAO
|
||||
|
||||
no_dao_account = '\x35' * 20
|
||||
|
||||
pre_balance = state.get_balance(no_dao_account)
|
||||
pre_dao_tokens = get_dao_balance(state, no_dao_account)
|
||||
|
||||
tx9 = Transaction(state.get_nonce(no_dao_account), 0, 1000000, dao, 0, dao_ct.encode('approve', [withdrawer, 100000 * 10**18])).sign('\x33' * 32)
|
||||
tx9._sender = no_dao_account
|
||||
apply_transaction(state, tx9)
|
||||
|
||||
tx10 = Transaction(state.get_nonce(no_dao_account), 0, 1000000, withdrawer, 0, withdrawer_ct.encode('withdraw', [])).sign('\x33' * 32)
|
||||
tx10._sender = no_dao_account
|
||||
apply_transaction(state, tx10)
|
||||
|
||||
post_balance = state.get_balance(no_dao_account)
|
||||
post_dao_tokens = get_dao_balance(state, no_dao_account)
|
||||
|
||||
assert pre_balance == post_balance == 0
|
||||
assert pre_dao_tokens == post_dao_tokens
|
||||
|
||||
print 'Withdrawal from a non-DAO-holding account has no effect'
|
||||
@@ -1,83 +0,0 @@
|
||||
import random, math
|
||||
|
||||
def mk_initial_balances(accts, coins):
|
||||
o = []
|
||||
for i in range(accts):
|
||||
o.extend([i] * random.randrange((coins - len(o)) * 2 // (accts - i)))
|
||||
o.extend([accts-1] * (coins - len(o)))
|
||||
return o
|
||||
|
||||
def fragments(coins):
|
||||
o = 0
|
||||
for i in range(1, len(coins)):
|
||||
if coins[i] != coins[i-1]:
|
||||
o += 1
|
||||
return o
|
||||
|
||||
def xfer(coins, frm, to, value):
|
||||
coins = coins[::]
|
||||
pos = 0
|
||||
while pos < len(coins) and value > 0:
|
||||
if coins[pos] == frm:
|
||||
coins[pos] = to
|
||||
value -= 1
|
||||
pos += 1
|
||||
return coins
|
||||
|
||||
def unscramble(coins, c1, c2):
|
||||
coins = coins[::]
|
||||
k1 = coins.count(c1)
|
||||
pos = 0
|
||||
while pos < len(coins):
|
||||
if coins[pos] in (c1, c2):
|
||||
coins[pos] = c1 if k1 > 0 else c2
|
||||
if coins[pos] == c1:
|
||||
k1 -= 1
|
||||
pos += 1
|
||||
return coins
|
||||
|
||||
def multi_unscramble(coins, addrs):
|
||||
coins = coins[::]
|
||||
ks = [coins.count(c) for c in addrs]
|
||||
pos = 0
|
||||
at = 0
|
||||
while pos < len(coins):
|
||||
if coins[pos] in addrs:
|
||||
coins[pos] = addrs[at]
|
||||
ks[at] -= 1
|
||||
if ks[at] == 0:
|
||||
at += 1
|
||||
pos += 1
|
||||
return coins
|
||||
|
||||
def unscramble_swap_strategy(coins, rounds):
|
||||
for i in range(rounds):
|
||||
c1, c2 = sorted([random.randrange(max(coins)+1) for _ in range(2)])
|
||||
coins = unscramble(coins, c1, c2)
|
||||
return coins
|
||||
|
||||
def run_with_unscrambling(coins, rounds):
|
||||
M = max(coins) + 1
|
||||
for i in range(rounds):
|
||||
c1, c2 = [random.randrange(M) for _ in range(2)]
|
||||
value = int(coins.count(c1) ** random.random())
|
||||
coins = xfer(coins, c1, c2, value)
|
||||
coins = unscramble(coins, min(c1, c2), max(c1, c2))
|
||||
return coins
|
||||
|
||||
def run_with_unscramble_online(coins, rounds):
|
||||
M = max(coins) + 1
|
||||
for i in range(rounds):
|
||||
c1, c2 = [random.randrange(M) for _ in range(2)]
|
||||
value = int(coins.count(c1) ** random.random())
|
||||
coins = xfer(coins, c1, c2, value)
|
||||
if random.random() < 1:
|
||||
cx = sorted([random.randrange(M) for _ in range(5)])
|
||||
coins = multi_unscramble(coins, cx)
|
||||
return coins
|
||||
|
||||
c = mk_initial_balances(200, 10000)
|
||||
# random.shuffle(c)
|
||||
# c = unscramble_swap_strategy(c, 20000)
|
||||
c = run_with_unscramble_online(c, 10000)
|
||||
print(fragments(c))
|
||||
@@ -1,49 +0,0 @@
|
||||
import random
|
||||
|
||||
def mk_shuffle(n):
|
||||
L = list(range(n))
|
||||
random.shuffle(L)
|
||||
return L
|
||||
|
||||
def mk_fragmented_shuffle(n, shuffs):
|
||||
L = list(range(n))
|
||||
for i in range(shuffs):
|
||||
x1 = random.randrange(n)
|
||||
x2 = random.randrange(n)
|
||||
value = int(min(n - x1, n - x2, abs(x2 - x1)) ** random.random())
|
||||
L[x1:x1+value], L[x2:x2+value] = L[x2:x2+value], L[x1:x1+value]
|
||||
return L
|
||||
|
||||
def fragments(vals):
|
||||
tot = 1
|
||||
for i in range(1, len(vals)):
|
||||
if vals[i] != vals[i-1] + 1:
|
||||
tot += 1
|
||||
return tot
|
||||
|
||||
def apply_perm(vals, perm):
|
||||
o = [0 for x in vals]
|
||||
for i in range(len(perm)):
|
||||
o[i] = vals[perm[i]]
|
||||
return o
|
||||
|
||||
def attempt_fix(vals):
|
||||
perm = list(range(len(vals)))
|
||||
indices = {}
|
||||
for i, x in enumerate(vals):
|
||||
indices[x] = i
|
||||
for i in range(len(vals)):
|
||||
if perm[i] == i and vals[i] != i:
|
||||
poz = indices[i]
|
||||
if perm[poz] == poz:
|
||||
perm[i], perm[poz] = perm[poz], perm[i]
|
||||
assert apply_perm(perm, perm) == list(range(len(vals)))
|
||||
return perm
|
||||
|
||||
def fix(vals):
|
||||
goal = list(range(len(vals)))
|
||||
path = []
|
||||
while vals != goal:
|
||||
vals = apply_perm(vals, attempt_fix(vals))
|
||||
path.append(vals)
|
||||
return path
|
||||
@@ -1,65 +0,0 @@
|
||||
import heapq, random
|
||||
|
||||
def distance_score(state):
|
||||
state = [int(x) for x in state.split(',')]
|
||||
tot = 0
|
||||
for i, s in enumerate(state):
|
||||
xorval = (i+1) ^ s
|
||||
indexmask = 1
|
||||
while indexmask < len(state):
|
||||
if xorval & indexmask:
|
||||
tot += 1
|
||||
indexmask <<= 1
|
||||
return tot
|
||||
|
||||
def generate_legal_moves_for_bit(state, bit):
|
||||
o = set()
|
||||
xormask = 2**bit
|
||||
state = [int(x) for x in state.split(',')]
|
||||
for i in range(2**(len(state)-1)):
|
||||
new_state = state[::]
|
||||
indexmask = 1
|
||||
for j in range(len(state)):
|
||||
if j > (j ^ xormask):
|
||||
if i & indexmask:
|
||||
new_state[j], new_state[j ^ xormask] = new_state[j ^ xormask], new_state[j]
|
||||
indexmask <<= 1
|
||||
o.add(','.join([str(x) for x in new_state]))
|
||||
return o
|
||||
|
||||
def generate_legal_moves(state):
|
||||
o = set()
|
||||
b = 0
|
||||
while 2**b < len(state):
|
||||
o = o.union(generate_legal_moves_for_bit(state, b))
|
||||
b += 1
|
||||
return o
|
||||
|
||||
def mk_shuffle(n):
|
||||
L = list(range(1, n+1))
|
||||
random.shuffle(L)
|
||||
return ','.join([str(x) for x in L])
|
||||
|
||||
def find_path(start):
|
||||
parents = {}
|
||||
scores = {start: 0}
|
||||
queue = [(distance_score(start), start)]
|
||||
goal = ','.join([str(x) for x in sorted([int(x) for x in start.split(',')])])
|
||||
totvs = 0
|
||||
while len(queue):
|
||||
qval = heapq.heappop(queue)[1]
|
||||
newvals = [x for x in generate_legal_moves(qval) if x not in scores]
|
||||
for v in newvals:
|
||||
if scores.get(v, 99999) > scores[qval] + 1:
|
||||
scores[v] = scores[qval] + 1
|
||||
parents[v] = qval
|
||||
totvs += 1
|
||||
if v == goal:
|
||||
path = [v]
|
||||
while path[-1] != start:
|
||||
parent = parents[path[-1]]
|
||||
path.append(parent)
|
||||
return path
|
||||
for v in newvals:
|
||||
heapq.heappush(queue, (distance_score(v), v))
|
||||
raise Exception("huh")
|
||||
@@ -1,146 +0,0 @@
|
||||
import random, heapq
|
||||
|
||||
# Assuming `online` is the set of users that is online, find a path to
|
||||
# send `amount` coins from `frm` to `to` through `coins` where each
|
||||
# step along the path is between users that have adjacent fragments.
|
||||
# A transfer done in this way does not contribute to fragmentation.
|
||||
def find_path(coins, frm, to, amount, online):
|
||||
# Determine who is whose neighbor
|
||||
neighbor_map = {}
|
||||
for i in range(amount, len(coins) - amount + 1):
|
||||
if coins[i-1] != coins[i]:
|
||||
if coins[i-1] in online and coins[i] in online:
|
||||
if coins[i-amount:i] == [coins[i-1]] * amount:
|
||||
neighbor_map[coins[i-1]] = list(set(neighbor_map.get(coins[i-1], []) + [coins[i]]))
|
||||
if coins[i:i+amount] == [coins[i]] * amount:
|
||||
neighbor_map[coins[i]] = list(set(neighbor_map.get(coins[i], []) + [coins[i-1]]))
|
||||
# Search for the path
|
||||
parents = {frm: None}
|
||||
q = [(0, frm)]
|
||||
while q:
|
||||
dist, sender = heapq.heappop(q)
|
||||
neighbors = neighbor_map.get(sender, [])
|
||||
for n in neighbors:
|
||||
if n not in parents:
|
||||
heapq.heappush(q, (dist+1, n))
|
||||
parents[n] = sender
|
||||
if n == to:
|
||||
o = [n]
|
||||
while o[0] != frm:
|
||||
o.insert(0, parents[o[0]])
|
||||
return o
|
||||
return False
|
||||
|
||||
# How many fragments are in this set of coins?
|
||||
def fragments(vals):
|
||||
tot = 1
|
||||
for i in range(1, len(vals)):
|
||||
if vals[i] != vals[i-1]:
|
||||
tot += 1
|
||||
return tot
|
||||
|
||||
# Send `amt` coins from `frm` to `to`. Increases fragmentation by
|
||||
# maximum 1
|
||||
def send_coins(coins, frm, to, amt):
|
||||
coins_to_send = amt
|
||||
for i in range(len(coins)):
|
||||
if coins[i] == frm:
|
||||
coins[i] = to
|
||||
coins_to_send -= 1
|
||||
if coins_to_send == 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
# Get the concrete range to transfer if we are transfering `amt`
|
||||
# coins from `frm` to `to` (must be neighboring fragments)
|
||||
def get_coin_shunt(coins, frm, to, amt):
|
||||
i = 1
|
||||
L = len(coins)
|
||||
while i < L:
|
||||
while i < L and coins[i] not in (frm, to):
|
||||
i += 1
|
||||
if not((coins[i-1] == frm and coins[i] == to) or (coins[i-1] == to and coins[i] == frm)):
|
||||
i += 1
|
||||
continue
|
||||
if coins[i-amt:i] == [frm] * amt and coins[i] == to:
|
||||
coins[i-amt:i] = [to] * amt
|
||||
return (i-amt, i, to)
|
||||
if coins[i:i+amt] == [frm] * amt and coins[i-1] == to:
|
||||
coins[i:i+amt] = [to] * amt
|
||||
return (i, i+amt, to)
|
||||
i += 1
|
||||
return False
|
||||
|
||||
# Find the largest slice controlled by `acct`
|
||||
def maxslice(coins, acct):
|
||||
maxsz = 0
|
||||
sz = 0
|
||||
for i in range(len(coins)):
|
||||
if coins[i] == acct:
|
||||
sz += 1
|
||||
maxsz = max(sz, maxsz)
|
||||
else:
|
||||
sz = 0
|
||||
return maxsz
|
||||
|
||||
# Count the number of coins and the number of fragments
|
||||
# held by each user
|
||||
def count_coins_and_fragments(coins):
|
||||
user_count = max(coins) + 1
|
||||
coin_count = [0] * user_count
|
||||
frag_count = [0] * user_count
|
||||
for i in range(len(coins)):
|
||||
coin_count[coins[i]] += 1
|
||||
if i > 0 and coins[i] != coins[i-1]:
|
||||
frag_count[coins[i]] += 1
|
||||
return coin_count, frag_count
|
||||
|
||||
userz = 25
|
||||
coinz = 50000
|
||||
part_online = 0.1
|
||||
initial_fragments_per_user = 100
|
||||
ordering = list(range(userz)) * initial_fragments_per_user
|
||||
random.shuffle(ordering)
|
||||
c = [ordering[i * len(ordering) //coinz] for i in range(coinz)]
|
||||
balances = count_coins_and_fragments(c)[0]
|
||||
for i in range(250000):
|
||||
if i%100 == 0:
|
||||
print(i, fragments(c))
|
||||
# if i%2000 == 0:
|
||||
# coin_count, frag_count = count_coins_and_fragments(c)
|
||||
# print(sorted(zip(coin_count, frag_count)))
|
||||
|
||||
# Randomly select sender, recipient and amount
|
||||
frm = random.randrange(userz)
|
||||
to = random.randrange(userz)
|
||||
if frm == to:
|
||||
continue
|
||||
pre_balance = balances[frm]
|
||||
amount = random.randrange(1, 1 + int(pre_balance ** random.random())) if pre_balance >= 2 else pre_balance
|
||||
full_amount = amount
|
||||
# print("Paying %d coins from %d to %d" % (amount, frm, to))
|
||||
# Randomly select the users that are online
|
||||
online = [i for i in range(userz) if random.random() < part_online or i in (frm, to)]
|
||||
while amount > 0:
|
||||
maxpay = maxslice(c, frm)
|
||||
pay_this_round = min(amount, maxpay)
|
||||
path = find_path(c, frm, to, pay_this_round, online)
|
||||
if path:
|
||||
#print("Found path for %d coins (%d hops)" % (pay_this_round, len(path)-1))
|
||||
assert path[0] == frm
|
||||
assert path[-1] == to
|
||||
shunts = []
|
||||
for i in range(1, len(path)):
|
||||
shunts.append(get_coin_shunt(c, path[i-1], path[i], pay_this_round))
|
||||
assert shunts[-1]
|
||||
for shunt in shunts:
|
||||
start, end, to = shunt
|
||||
c[start:end] = [to] * (end-start)
|
||||
amount -= pay_this_round
|
||||
else:
|
||||
# print('No path, paying remaining amount %d via fragmentation' % amount)
|
||||
# print('%d fragments' % fragments(c))
|
||||
assert send_coins(c, frm, to, amount)
|
||||
break
|
||||
balances[frm] -= full_amount
|
||||
balances[to] += full_amount
|
||||
@@ -1,150 +0,0 @@
|
||||
import math, random
|
||||
|
||||
hashpower = [float(x) for x in open('hashpower.csv').readlines()]
|
||||
|
||||
# Target block time
|
||||
TARGET = 12
|
||||
# Should be 86400, but can reduce for a quicker sim
|
||||
SECONDS_IN_DAY = 86400
|
||||
# Look at the 1/x day exponential moving average
|
||||
EMA_FACTOR = 0.01
|
||||
# Damping factor for simple difficulty adjustment
|
||||
SIMPLE_ADJUST_DAMPING_FACTOR = 20
|
||||
# Maximum per-block diff adjustment (as fraction of current diff)
|
||||
SIMPLE_ADJUST_MAX = 0.5
|
||||
# Damping factor for quadratic difficulty adjustment
|
||||
QUADRATIC_ADJUST_DAMPING_FACTOR = 3
|
||||
# Maximum per-block diff adjustment (as fraction of current diff)
|
||||
QUADRATIC_ADJUST_MAX = 0.5
|
||||
# Threshold for bounded adjustor
|
||||
BOUNDED_ADJUST_THRESHOLD = 1.3
|
||||
# Bounded adjustment factor
|
||||
BOUNDED_ADJUST_FACTOR = 0.01
|
||||
# How many blocks back to look
|
||||
BLKS_BACK = 10
|
||||
# Naive difficulty adjustment factor
|
||||
NAIVE_ADJUST_FACTOR = 1/1024.
|
||||
|
||||
|
||||
# Produces a value according to the exponential distribution; used
|
||||
# to determine the time until the next block given an average block
|
||||
# time of t
|
||||
def expdiff(t):
|
||||
return -math.log(random.random()) * t
|
||||
|
||||
|
||||
# abs_sqr(3) = 9, abs_sqr(-7) = -49, etc
|
||||
def abs_sqr(x):
|
||||
return -(x**2) if x < 0 else x**2
|
||||
|
||||
|
||||
# Given an array of the most recent timestamps, and the most recent
|
||||
# difficulties, compute the next difficulty
|
||||
def simple_adjust(timestamps, diffs):
|
||||
if len(timestamps) < BLKS_BACK + 2:
|
||||
return diffs[-1]
|
||||
# Total interval between previous block and block a bit further back
|
||||
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
|
||||
# Expected interval
|
||||
expected = TARGET * BLKS_BACK
|
||||
# Compute adjustment factor
|
||||
fac = 1 - (delta / expected - 1) / SIMPLE_ADJUST_DAMPING_FACTOR
|
||||
fac = max(min(fac, 1 + SIMPLE_ADJUST_MAX), 1 - SIMPLE_ADJUST_MAX)
|
||||
return diffs[-1] * fac
|
||||
|
||||
|
||||
# Alternative adjustment algorithm
|
||||
def quadratic_adjust(timestamps, diffs):
|
||||
if len(timestamps) < BLKS_BACK + 2:
|
||||
return diffs[-1]
|
||||
# Total interval between previous block and block a bit further back
|
||||
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
|
||||
# Expected interval
|
||||
expected = TARGET * BLKS_BACK
|
||||
# Compute adjustment factor
|
||||
fac = 1 - abs_sqr(delta / expected - 1) / QUADRATIC_ADJUST_DAMPING_FACTOR
|
||||
fac = max(min(fac, 1 + QUADRATIC_ADJUST_MAX), 1 - QUADRATIC_ADJUST_MAX)
|
||||
return diffs[-1] * fac
|
||||
|
||||
|
||||
# Alternative adjustment algorithm
|
||||
def bounded_adjust(timestamps, diffs):
|
||||
if len(timestamps) < BLKS_BACK + 2:
|
||||
return diffs[-1]
|
||||
# Total interval between previous block and block a bit further back
|
||||
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
|
||||
# Expected interval
|
||||
expected = TARGET * BLKS_BACK
|
||||
if delta / expected > BOUNDED_ADJUST_THRESHOLD:
|
||||
fac = (1 - BOUNDED_ADJUST_FACTOR)
|
||||
elif delta / expected < 1 / BOUNDED_ADJUST_THRESHOLD:
|
||||
fac = (1 + BOUNDED_ADJUST_FACTOR) ** (delta / expected)
|
||||
else:
|
||||
fac = 1
|
||||
return diffs[-1] * fac
|
||||
|
||||
|
||||
# Old Ethereum algorithm
|
||||
def old_adjust(timestamps, diffs):
|
||||
if len(timestamps) < 2:
|
||||
return diffs[-1]
|
||||
delta = timestamps[-1] - timestamps[-2]
|
||||
expected = TARGET * 0.693
|
||||
if delta > expected:
|
||||
fac = 1 - NAIVE_ADJUST_FACTOR
|
||||
else:
|
||||
fac = 1 + NAIVE_ADJUST_FACTOR
|
||||
return diffs[-1] * fac
|
||||
|
||||
|
||||
def test(source, adjust):
|
||||
# Variables to keep track of for stats purposes
|
||||
ema = maxema = minema = TARGET
|
||||
lthalf, gtdouble, lttq, gtft = 0, 0, 0, 0
|
||||
count = 0
|
||||
# Block times
|
||||
times = [0]
|
||||
# Block difficulty values
|
||||
diffs = [source[0]]
|
||||
# Next time to print status update
|
||||
nextprint = 10**6
|
||||
# Main loop
|
||||
while times[-1] < len(source) * SECONDS_IN_DAY:
|
||||
# Print status update every 10**6 seconds
|
||||
if times[-1] > nextprint:
|
||||
print '%d out of %d processed, ema %f' % \
|
||||
(times[-1], len(source) * SECONDS_IN_DAY, ema)
|
||||
nextprint += 10**6
|
||||
# Grab hashpower from data source
|
||||
hashpower = source[int(times[-1] // SECONDS_IN_DAY)]
|
||||
# Calculate new difficulty
|
||||
diffs.append(adjust(times, diffs))
|
||||
# Calculate next block time
|
||||
times.append(times[-1] + expdiff(diffs[-1] / hashpower))
|
||||
# Calculate min and max ema
|
||||
ema = ema * (1 - EMA_FACTOR) + (times[-1] - times[-2]) * EMA_FACTOR
|
||||
minema = min(minema, ema)
|
||||
maxema = max(maxema, ema)
|
||||
count += 1
|
||||
# Keep track of number of blocks we are below 75/50% or above
|
||||
# 133/200% of target
|
||||
if ema < TARGET * 0.75:
|
||||
lttq += 1
|
||||
if ema < TARGET * 0.5:
|
||||
lthalf += 1
|
||||
elif ema > TARGET * 1.33333:
|
||||
gtft += 1
|
||||
if ema > TARGET * 2:
|
||||
gtdouble += 1
|
||||
# Pop items to save memory
|
||||
if len(times) > 2000:
|
||||
times.pop(0)
|
||||
diffs.pop(0)
|
||||
print 'min', minema, 'max', maxema, 'avg', times[-1] / count, \
|
||||
'ema < half', lthalf * 1.0 / count, \
|
||||
'ema > double', gtdouble * 1.0 / count, \
|
||||
'ema < 3/4', lttq * 1.0 / count, \
|
||||
'ema > 4/3', gtft * 1.0 / count
|
||||
|
||||
# Example usage
|
||||
# blkdiff.test(blkdiff.hashpower, blkdiff.simple_adjust)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,68 +0,0 @@
|
||||
# Total ETH supply
|
||||
total_supply = 10**8
|
||||
|
||||
# Returns to online validators, based on perceived fraction online
|
||||
def R(p):
|
||||
return p**1.53
|
||||
|
||||
# Returns to offline validators, based on perceived fraction online
|
||||
def P(p):
|
||||
return 0
|
||||
|
||||
# What interest rate are `deposits` ETH worth of validators willing to accept?
|
||||
def demand_curve(deposits):
|
||||
return deposits / total_supply / 10
|
||||
|
||||
# Given a total deposit size, and a fraction online, compute the interest
|
||||
# paid to each online and offline validator
|
||||
def get_rewards(deposits, p_online):
|
||||
# Total txfees
|
||||
fees = 50000
|
||||
# Portion of fees that get "reclaimed" by the protocol
|
||||
reclaimed = 0.5
|
||||
# The un-reclaimed fees, which are necessarily distributed among
|
||||
# the online validators
|
||||
uncontrolled_reward = fees * (1 - reclaimed) / deposits / p_online
|
||||
# The reclaimed fees, minus a portion that gets held back based on
|
||||
# the portion of ETH holders staking
|
||||
max_controlled_rewards = fees * reclaimed * \
|
||||
(deposits / total_supply) ** 0.8
|
||||
# In the best possible case (100% online), everyone gets R(1) interest.
|
||||
# Rescale interest based on this.
|
||||
controlled_rewards_multiplier = max_controlled_rewards / deposits / R(1)
|
||||
# Return computed interest
|
||||
return uncontrolled_reward + controlled_rewards_multiplier * R(p_online), \
|
||||
controlled_rewards_multiplier * P(p_online)
|
||||
|
||||
# Total deposits in the validator set
|
||||
deposits = 1000000
|
||||
|
||||
# Find the pre-attack equilibrium
|
||||
for i in range(100):
|
||||
interest, _ = get_rewards(deposits, 1)
|
||||
if interest < demand_curve(deposits):
|
||||
deposits -= 10000
|
||||
else:
|
||||
deposits += 10000
|
||||
|
||||
attacker = deposits * 0.501
|
||||
print('Baseline total deposits:', deposits)
|
||||
print('Baseline interest:', get_rewards(deposits, 1)[0])
|
||||
print('Baseline attacker revenue:', get_rewards(deposits, 1)[0] * attacker)
|
||||
print('Baseline victim revenue:', get_rewards(deposits, 1)[0] * (deposits - attacker))
|
||||
|
||||
# Start the attack. Find the post-attack equilibrium.
|
||||
for i in range(1000):
|
||||
attacker_share = attacker / deposits
|
||||
atk_interest, vic_interest = get_rewards(deposits, attacker_share)
|
||||
if vic_interest < demand_curve(deposits):
|
||||
deposits -= min(10000, deposits - attacker)
|
||||
else:
|
||||
deposits += 10000
|
||||
|
||||
print('New total deposits:', deposits)
|
||||
print('Attacker share:', attacker_share)
|
||||
print('Victim interest:', vic_interest)
|
||||
print('Attacker interest:', atk_interest)
|
||||
print('Attacker revenue:', atk_interest * attacker)
|
||||
print('Possible non-attacking revenue:', get_rewards(deposits, 1)[0])
|
||||
@@ -1,28 +0,0 @@
|
||||
# Expects input 224+x bytes: v, r, s, nonce, gasprice, to, value, data
|
||||
with zero = ~mload(0):
|
||||
# Anti re-entrancy
|
||||
~jumpi(~pc(), msg.sender != ~sub(zero, 1))
|
||||
# Copy calldata
|
||||
~calldatacopy(32, zero, ~calldatasize())
|
||||
# Compute sighash
|
||||
~mstore(zero, ~sha3(32, 32 + ~calldatasize()))
|
||||
# Do elliptic curve verification
|
||||
~call(3000, 1, zero, zero, 128, zero, 32)
|
||||
# Memory: hash, v, r, s, nonce, gasprice, to, value, data
|
||||
# Check sig is correct
|
||||
~jumpi(~pc(), ~mload(zero) != 0xfe2ec957647679d210034b65e9c7db2452910b0c)
|
||||
with s = ~sload(zero):
|
||||
# Check nonce is correct
|
||||
~jumpi(~pc(), s != ~mload(128))
|
||||
# Increment nonce
|
||||
~sstore(zero, s + 1)
|
||||
with gasprice = ~mload(160):
|
||||
# Check balance
|
||||
~jumpi(~pc(), self.balance < gasprice * msg.gas)
|
||||
with g1 = msg.gas:
|
||||
# Make the main call
|
||||
~call(msg.gas - 25000, ~mload(192), ~mload(224), 256, ~calldatasize() - 224, zero, 10000)
|
||||
# Pay the miner
|
||||
~call(zero, block.coinbase, (g1 - msg.gas + 5000) * gasprice, zero, zero, zero, zero)
|
||||
# Log to establish that the tx passed through successfully
|
||||
~log0(zero, zero)
|
||||
@@ -1,25 +0,0 @@
|
||||
# Setting the block hash
|
||||
if msg.sender == 2**160 - 2:
|
||||
with prev_block_number = block.number - 1:
|
||||
# Use storage fields 0..255 to store the last 256 hashes
|
||||
~sstore(prev_block_number % 256, ~calldataload(0))
|
||||
# Use storage fields 256..511 to store the hashes of the last 256
|
||||
# blocks with block.number % 256 == 0
|
||||
if not (prev_block_number % 256):
|
||||
~sstore(256 + (prev_block_number / 256) % 256, ~calldataload(0))
|
||||
# Use storage fields 512..767 to store the hashes of the last 256
|
||||
# blocks with block.number % 65536 == 0
|
||||
if not (prev_block_number % 65536):
|
||||
~sstore(512 + (prev_block_number / 65536) % 256, ~calldataload(0))
|
||||
# Getting the block hash
|
||||
else:
|
||||
if ~calldataload(0) >= block.number:
|
||||
return(0)
|
||||
elif block.number - ~calldataload(0) <= 256:
|
||||
return(~sload(~calldataload(0) % 256))
|
||||
elif (not (~calldataload(0) % 256) and block.number - ~calldataload(0) <= 65536):
|
||||
return(~sload(256 + (~calldataload(0) / 256) % 256))
|
||||
elif (not (~calldataload(0) % 65536) and block.number - ~calldataload(0) <= 16777216):
|
||||
return(~sload(512 + (~calldataload(0) / 65536) % 256))
|
||||
else:
|
||||
return(0)
|
||||
@@ -1,34 +0,0 @@
|
||||
from ethereum import tester, vm
|
||||
from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex
|
||||
from ethereum.state_transition import apply_message
|
||||
s = tester.state()
|
||||
c = s.contract('eip_96_blockhash_getter.se.py')
|
||||
blockhash_addr = b'\x00' * 19 + b'\x10'
|
||||
system_addr = b'\xff' * 19 + b'\xfe'
|
||||
s.state.set_code(blockhash_addr, s.state.get_code(c))
|
||||
|
||||
def mk_hash_setting_message(data):
|
||||
return vm.Message(sender=system_addr, to=blockhash_addr, value=0, gas=1000000, data=data)
|
||||
|
||||
print("Setting block hashes")
|
||||
for i in range(1, 1000):
|
||||
s.state.block_number = i + 1
|
||||
o = apply_message(s.state, mk_hash_setting_message(sha3(str(i))))
|
||||
if i % 100 == 0:
|
||||
print("Set %d" % i)
|
||||
|
||||
print("Testing reads")
|
||||
s.state.block_number = 1000
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(999)) == sha3(str(999))
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(998)) == sha3(str(998))
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(744)) == sha3(str(744))
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(743)) == b'\x00' * 32
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1000)) == b'\x00' * 32
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1001)) == b'\x00' * 32
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(513)) == b'\x00' * 32
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(512)) == sha3(str(512))
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(511)) == b'\x00' * 32
|
||||
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(256)) == sha3(str(256))
|
||||
print("Tests passed!")
|
||||
|
||||
print("EVM code: 0x%s" % encode_hex(s.state.get_code(blockhash_addr)))
|
||||
@@ -1,36 +0,0 @@
|
||||
lines = open('data.csv').read().split('\n')
|
||||
data = [(int(x[:x.find(',')]), float(x[x.find(',')+1:])) for x in lines if x]
|
||||
|
||||
REPORT_THRESHOLD = 0.23
|
||||
|
||||
def get_error(scale, elasticity, growth):
|
||||
err = 0
|
||||
bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity)
|
||||
for i, (block_size, avg_fee) in enumerate(data):
|
||||
expected = scale * (1 + growth) ** i
|
||||
actual = block_size ** bs_fac * avg_fee ** fee_fac
|
||||
# if i >= len(data) - 6:
|
||||
# err += ((expected / actual - 1) ** 2) * 2
|
||||
err += (expected / actual - 1) ** 2
|
||||
return err
|
||||
|
||||
best = (0, 0, 0, 9999999999999999999999999.0)
|
||||
|
||||
for scale in [1 * 1.05 ** x for x in range(300)]:
|
||||
for elasticity in [x*0.025 for x in range(120)]:
|
||||
for growth in [x*0.001 for x in range(120)]:
|
||||
err = get_error(scale, elasticity, growth)
|
||||
if err <= REPORT_THRESHOLD:
|
||||
print('%d %.3f %.3f: %.3f' % (scale, elasticity, growth, err))
|
||||
if err < best[-1]:
|
||||
best = scale, elasticity, growth, err
|
||||
|
||||
print('Best params: %d %.3f %.3f (err %.3f)' % best)
|
||||
|
||||
scale, elasticity, growth, err = best
|
||||
bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity)
|
||||
|
||||
for i, (block_size, avg_fee) in enumerate(data):
|
||||
expected = scale * (1 + growth) ** i
|
||||
actual = block_size ** bs_fac * avg_fee ** fee_fac
|
||||
print(i, actual, expected)
|
||||
@@ -1,28 +0,0 @@
|
||||
358373,0.0001556
|
||||
373527,0.00014486
|
||||
387199,0.00014816
|
||||
404860,0.00017626
|
||||
392748,0.00015425
|
||||
454844,0.00016187
|
||||
594453,0.0002212
|
||||
463351,0.00021871
|
||||
555749,0.00019946
|
||||
543490,0.00019368
|
||||
566810,0.00017566
|
||||
623697,0.00017407
|
||||
662227,0.00019356
|
||||
732706,0.00020016
|
||||
715370,0.00022996
|
||||
743867,0.00025096
|
||||
769594,0.00022846
|
||||
820848,0.00027225
|
||||
785600,0.00027909
|
||||
777843,0.00030502
|
||||
749121,0.00027106
|
||||
827073,0.00031194
|
||||
863884,0.00032692
|
||||
873975,0.00035117
|
||||
864976,0.00040644
|
||||
949126,0.00054382
|
||||
955451,0.00080043
|
||||
957912,0.00076301
|
||||
|
Binary file not shown.
|
Before Width: | Height: | Size: 14 KiB |
@@ -1,51 +0,0 @@
|
||||
#include "share.h"
|
||||
|
||||
// returns a * (x+1) in the Galois field
|
||||
// (since (x+1) is a primitive root)
|
||||
static constexpr std::uint8_t tpl(std::uint8_t a) {
|
||||
return a ^ (a<<1) // a * (x+1)
|
||||
^ ((a & (1<<7)) != 0
|
||||
? // would overflow (have an x^8 term); reduce by the AES polynomial,
|
||||
// x^8 + x^4 + x^3 + x + 1
|
||||
0b00011011u
|
||||
: 0
|
||||
);
|
||||
}
|
||||
|
||||
// constexpr functions to compute exp/log
|
||||
// these are not intended to be fast, but they must be constexpr to populate a
|
||||
// table at compile time
|
||||
static constexpr std::uint8_t gexp(unsigned k) {
|
||||
return k > 0 ? tpl(gexp(k-1)) : 1;
|
||||
}
|
||||
static constexpr std::uint8_t glog(unsigned k, unsigned i = 0, unsigned v = 1) {
|
||||
return k == v ? i : glog(k, i+1, tpl(v));
|
||||
}
|
||||
|
||||
// insane hack (courtesy of Xeo on stackoverflow): gen_seq<N> expands to a
|
||||
// struct that derives from seq<0, 1, ..., N-1>
|
||||
template<unsigned... I> struct seq{};
|
||||
template<unsigned N, unsigned... I>
|
||||
struct gen_seq : gen_seq<N-1, N-1, I...>{};
|
||||
template<unsigned... I>
|
||||
struct gen_seq<0, I...> : seq<I...>{};
|
||||
|
||||
// produce the actual tables in array form...
|
||||
template<unsigned... I>
|
||||
constexpr std::array<Galois, 255> exptbl(seq<I...>) {
|
||||
return { { Galois(gexp(I))... } };
|
||||
}
|
||||
template<unsigned... I>
|
||||
constexpr std::array<std::uint8_t, 256> logtbl(seq<I...>) {
|
||||
// manually populate entry zero, for two reasons:
|
||||
// - it makes glog simpler
|
||||
// - it avoids clang++'s default template instantiation depth limit of 256
|
||||
return { { 0, glog(I+1)... } };
|
||||
}
|
||||
|
||||
// and initialize the static variables
|
||||
const std::array<Galois, 255> Galois::exptable = exptbl(gen_seq<255>{});
|
||||
const std::array<std::uint8_t, 256> Galois::logtable = logtbl(gen_seq<255>{});
|
||||
|
||||
// by populating everything at compile-time, we avoid a static initialization
|
||||
// step and any possible associated static initialization "races"
|
||||
@@ -1,559 +0,0 @@
|
||||
package erasure_code
|
||||
|
||||
import "fmt"
|
||||
|
||||
func init() {
|
||||
galoisInit()
|
||||
}
|
||||
|
||||
// Finite fields
|
||||
// =============
|
||||
|
||||
type ZeroDivisionError struct {
|
||||
}
|
||||
|
||||
func (e *ZeroDivisionError) Error() string {
|
||||
return "division by zero"
|
||||
}
|
||||
|
||||
// should panic with ZeroDivisionError when dividing by zero
|
||||
type Field interface {
|
||||
Add(b Field) Field
|
||||
Sub(b Field) Field
|
||||
Mul(b Field) Field
|
||||
Div(b Field) Field
|
||||
Value() int
|
||||
Factory() FieldFactory
|
||||
}
|
||||
type FieldFactory interface {
|
||||
Construct(v int) Field
|
||||
}
|
||||
|
||||
// per-byte 2^8 Galois field
|
||||
// Note that this imposes a hard limit that the number of extended chunks can
|
||||
// be at most 256 along each dimension
|
||||
type Galois struct {
|
||||
v uint8
|
||||
}
|
||||
|
||||
var gexptable [255]uint8
|
||||
var glogtable [256]uint8
|
||||
|
||||
func galoisTpl(a uint8) uint8 {
|
||||
r := a ^ (a << 1) // a * (x+1)
|
||||
if (a & (1 << 7)) != 0 {
|
||||
// would overflow (have an x^8 term); reduce by the AES polynomial,
|
||||
// x^8 + x^4 + x^3 + x + 1
|
||||
return r ^ 0x1b
|
||||
} else {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
func galoisInit() {
|
||||
var v uint8 = 1
|
||||
for i := uint8(0); i < 255; i++ {
|
||||
glogtable[v] = i
|
||||
gexptable[i] = v
|
||||
v = galoisTpl(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Galois) Add(_b Field) Field {
|
||||
b := _b.(*Galois)
|
||||
return &Galois{a.v ^ b.v}
|
||||
}
|
||||
func (a *Galois) Sub(_b Field) Field {
|
||||
b := _b.(*Galois)
|
||||
return &Galois{a.v ^ b.v}
|
||||
}
|
||||
func (a *Galois) Mul(_b Field) Field {
|
||||
b := _b.(*Galois)
|
||||
if a.v == 0 || b.v == 0 {
|
||||
return &Galois{0}
|
||||
}
|
||||
return &Galois{gexptable[(int(glogtable[a.v])+
|
||||
int(glogtable[b.v]))%255]}
|
||||
}
|
||||
func (a *Galois) Div(_b Field) Field {
|
||||
b := _b.(*Galois)
|
||||
if b.v == 0 {
|
||||
panic(ZeroDivisionError{})
|
||||
}
|
||||
if a.v == 0 {
|
||||
return &Galois{0}
|
||||
}
|
||||
return &Galois{gexptable[(int(glogtable[a.v])+255-
|
||||
int(glogtable[b.v]))%255]}
|
||||
}
|
||||
func (a *Galois) Value() int {
|
||||
return int(a.v)
|
||||
}
|
||||
func (a *Galois) String() string {
|
||||
return fmt.Sprintf("%d",a.v)
|
||||
}
|
||||
|
||||
type galoisFactory struct {
|
||||
}
|
||||
|
||||
func GaloisFactory() FieldFactory {
|
||||
return &galoisFactory{}
|
||||
}
|
||||
func (self *Galois) Factory() FieldFactory {
|
||||
return GaloisFactory()
|
||||
}
|
||||
func (self *galoisFactory) Construct(v int) Field {
|
||||
return &Galois{uint8(v)}
|
||||
}
|
||||
|
||||
// Modular arithmetic class
|
||||
type modulo struct {
|
||||
v uint
|
||||
n uint // the modulus
|
||||
}
|
||||
|
||||
func (a *modulo) Add(_b Field) Field {
|
||||
b := _b.(*modulo)
|
||||
return &modulo{(a.v + b.v) % a.n, a.n}
|
||||
}
|
||||
|
||||
func (a *modulo) Sub(_b Field) Field {
|
||||
b := _b.(*modulo)
|
||||
return &modulo{(a.v + a.n - b.v) % a.n, a.n}
|
||||
}
|
||||
|
||||
func (a *modulo) Mul(_b Field) Field {
|
||||
b := _b.(*modulo)
|
||||
return &modulo{(a.v * b.v) % a.n, a.n}
|
||||
}
|
||||
|
||||
func powmod(b uint, e uint, m uint) uint {
|
||||
var r uint = 1
|
||||
for e > 0 {
|
||||
if (e & 1) == 1 {
|
||||
r = (r * b) % m
|
||||
}
|
||||
b = (b * b) % m
|
||||
e >>= 1
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (a *modulo) Div(_b Field) Field {
|
||||
b := _b.(*modulo)
|
||||
return &modulo{(a.v * powmod(b.v, a.n-2, a.n)) % a.n, a.n}
|
||||
}
|
||||
|
||||
func (self *modulo) Value() int {
|
||||
return int(self.v)
|
||||
}
|
||||
|
||||
type moduloFactory struct {
|
||||
n uint
|
||||
}
|
||||
|
||||
func (self *modulo) Factory() FieldFactory {
|
||||
return &moduloFactory{self.n}
|
||||
}
|
||||
|
||||
func (self *moduloFactory) Construct(v int) Field {
|
||||
return &modulo{uint(v), self.n}
|
||||
}
|
||||
|
||||
func MakeModuloFactory(n uint) FieldFactory {
|
||||
return &moduloFactory{n}
|
||||
}
|
||||
|
||||
func zero(f FieldFactory) Field {
|
||||
return f.Construct(0)
|
||||
}
|
||||
func one(f FieldFactory) Field {
|
||||
return f.Construct(1)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
// ================
|
||||
|
||||
// Evaluates a polynomial p in little-endian form (e.g. x^2 + 3x + 2 is
|
||||
// represented as [2, 3, 1]) at coordinate x,
|
||||
func EvalPolyAt(poly []Field, x Field) Field {
|
||||
arithmetic := x.Factory()
|
||||
r, xi := zero(arithmetic), one(arithmetic)
|
||||
for _, ci := range poly {
|
||||
r = r.Add(xi.Mul(ci))
|
||||
xi = xi.Mul(x)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Given p+1 y values and x values with no errors, recovers the original
|
||||
// p+1 degree polynomial. For example,
|
||||
// LagrangeInterp({51.0, 59.0, 66.0}, {1, 3, 4}) = {50.0, 0, 1.0}
|
||||
// (or it would be, if floats were Fields)
|
||||
func LagrangeInterp(pieces []Field, xs []Field) []Field {
|
||||
arithmetic := pieces[0].Factory()
|
||||
zero, one := zero(arithmetic), one(arithmetic)
|
||||
|
||||
// `size` is the number of datapoints; the degree of the result polynomial
|
||||
// is then `size-1`
|
||||
size := len(pieces)
|
||||
|
||||
root := []Field{one} // initially just the polynomial "1"
|
||||
// build up the numerator polynomial, `root`, by taking the product of (x-v)
|
||||
// (implemented as convolving repeatedly with [-v, 1])
|
||||
for _, v := range xs {
|
||||
// iterate backward since new root[i] depends on old root[i-1]
|
||||
for i := len(root) - 1; i >= 0; i-- {
|
||||
root[i] = root[i].Mul(zero.Sub(v))
|
||||
if i > 0 {
|
||||
root[i] = root[i].Add(root[i-1])
|
||||
}
|
||||
}
|
||||
// polynomial is always monic so save an extra multiply by doing this
|
||||
// after
|
||||
root = append(root, one)
|
||||
}
|
||||
|
||||
// generate per-value numerator polynomials by dividing the master
|
||||
// polynomial back by each x coordinate
|
||||
nums := make([][]Field, size)
|
||||
for i, v := range xs {
|
||||
// divide `root` by (x-v) to get a degree size-1 polynomial
|
||||
// (i.e. with `size` coefficients)
|
||||
num := make([]Field, size)
|
||||
// compute the x^0, x^1, ..., x^(p-2) coefficients by long division
|
||||
last := one
|
||||
num[len(num)-1] = last // still always a monic polynomial
|
||||
for j := size - 2; j >= 0; j-- {
|
||||
last = root[j+1].Add(last.Mul(v))
|
||||
num[j] = last
|
||||
}
|
||||
nums[i] = num
|
||||
}
|
||||
|
||||
// generate denominators by evaluating numerator polys at their x
|
||||
denoms := make([]Field, size)
|
||||
for i, x := range xs {
|
||||
denoms[i] = EvalPolyAt(nums[i], x)
|
||||
}
|
||||
|
||||
// generate output polynomial by taking the sum over i of
|
||||
// (nums[i] * pieces[i] / denoms[i])
|
||||
sum := make([]Field, size)
|
||||
for i := range sum {
|
||||
sum[i] = zero
|
||||
}
|
||||
for i, y := range pieces {
|
||||
factor := y.Div(denoms[i])
|
||||
// add nums[i] * factor to sum, as a vector
|
||||
for j := 0; j < size; j++ {
|
||||
sum[j] = sum[j].Add(nums[i][j].Mul(factor))
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// Given two linear equations, eliminates the first variable and returns
|
||||
// the resulting equation.
|
||||
//
|
||||
// An equation of the form a_1 x_1 + ... + a_n x_n + b = 0
|
||||
// is represented as the array [a_1, ..., a_n, b].
|
||||
func elim(a []Field, b []Field) []Field {
|
||||
result := make([]Field, len(a)-1)
|
||||
for i := range result {
|
||||
result[i] = a[i+1].Mul(b[0]).Sub(b[i+1].Mul(a[0]))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Given one homogeneous linear equation and the values of all but the first
|
||||
// variable, solve for the value of the first variable.
|
||||
//
|
||||
// For an equation of the form
|
||||
// a_1 x_1 + ... + a_n x_n = 0
|
||||
// pass two arrays, [a_1, ..., a_n] and [x_2, ..., x_n].
|
||||
func evaluate(coeffs []Field, vals []Field) Field {
|
||||
total := zero(coeffs[0].Factory())
|
||||
for i, val := range vals {
|
||||
total = total.Sub(coeffs[i+1].Mul(val))
|
||||
}
|
||||
return total.Div(coeffs[0])
|
||||
}
|
||||
|
||||
// Given an n*n system of inhomogeneous linear equations, solve for the value of
|
||||
// every variable.
|
||||
//
|
||||
// For equations of the form
|
||||
// a_1,1 x_1 + ... + a_1,n x_n + b_1 = 0
|
||||
// a_2,1 x_1 + ... + a_2,n x_n + b_2 = 0
|
||||
// ...
|
||||
// a_n,1 x_1 + ... + a_n,n x_n + b_n = 0
|
||||
// pass a two-dimensional array
|
||||
// [[a_1,1, ..., a_1,n, b_1], ..., [a_n,1, ..., a_n,n, b_n]].
|
||||
//
|
||||
// Returns the values of [x_1, ..., x_n].
|
||||
func SysSolve(eqs [][]Field) []Field {
|
||||
arithmetic := eqs[0][0].Factory()
|
||||
backEqs := make([][]Field, 1, len(eqs))
|
||||
backEqs[0] = eqs[0]
|
||||
|
||||
for len(eqs) > 1 {
|
||||
neweqs := make([][]Field, len(eqs)-1)
|
||||
for i := 0; i < len(eqs)-1; i++ {
|
||||
neweqs[i] = elim(eqs[i], eqs[i+1])
|
||||
}
|
||||
eqs = neweqs
|
||||
// find a row with a nonzero first entry
|
||||
i := 0
|
||||
for i+1 < len(eqs) && eqs[i][0].Value() == 0 {
|
||||
i++
|
||||
}
|
||||
backEqs = append(backEqs, eqs[i])
|
||||
}
|
||||
|
||||
kvals := make([]Field, len(backEqs)+1)
|
||||
kvals[len(backEqs)] = one(arithmetic)
|
||||
// back-substitute in reverse order
|
||||
// (smallest to largest equation)
|
||||
for i := len(backEqs) - 1; i >= 0; i-- {
|
||||
kvals[i] = evaluate(backEqs[i], kvals[i+1:])
|
||||
}
|
||||
|
||||
return kvals[:len(kvals)-1]
|
||||
}
|
||||
|
||||
// Divide two polynomials with nonzero leading terms.
|
||||
// T should be a field.
|
||||
func PolyDiv(Q []Field, E []Field) []Field {
|
||||
if len(Q) < len(E) {
|
||||
return []Field{}
|
||||
}
|
||||
div := make([]Field, len(Q)-len(E)+1)
|
||||
for i := len(div) - 1; i >= 0; i-- {
|
||||
factor := Q[len(Q)-1].Div(E[len(E)-1])
|
||||
div[i] = factor
|
||||
// subtract factor * E * x^i from Q
|
||||
Q = Q[:len(Q)-1] // the highest term should cancel
|
||||
for j := 0; j < len(E)-1; j++ {
|
||||
Q[i+j] = Q[i+j].Sub(factor.Mul(E[j]))
|
||||
}
|
||||
}
|
||||
return div
|
||||
}
|
||||
|
||||
func trySysSolve(eqs [][]Field) (ret []Field, ok bool) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case ZeroDivisionError:
|
||||
ret = nil
|
||||
ok = false
|
||||
default:
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
return SysSolve(eqs), true
|
||||
}
|
||||
|
||||
type NotEnoughData struct { }
|
||||
func (self *NotEnoughData) Error() string {
|
||||
return "Not enough data!"
|
||||
}
|
||||
type TooManyErrors struct { }
|
||||
func (self *TooManyErrors) Error() string {
|
||||
return "Answer doesn't match (too many errors)!"
|
||||
}
|
||||
|
||||
// Given a set of y coordinates and x coordinates, and the degree of the
|
||||
// original polynomial, determines the original polynomial even if some of the y
|
||||
// coordinates are wrong. If m is the minimal number of pieces (ie. degree +
|
||||
// 1), t is the total number of pieces provided, then the algo can handle up to
|
||||
// (t-m)/2 errors.
|
||||
func BerlekampWelchAttempt(pieces []Field, xs []Field, masterDegree int) ([]Field, error) {
|
||||
errorLocatorDegree := (len(pieces) - masterDegree - 1) / 2
|
||||
arithmetic := pieces[0].Factory()
|
||||
zero, one := zero(arithmetic), one(arithmetic)
|
||||
// Set up the equations for y[i]E(x[i]) = Q(x[i])
|
||||
// degree(E) = errorLocatorDegree
|
||||
// degree(Q) = masterDegree + errorLocatorDegree - 1
|
||||
eqs := make([][]Field, 2*errorLocatorDegree+masterDegree+1)
|
||||
for i := range eqs {
|
||||
eq := []Field{}
|
||||
x := xs[i]
|
||||
piece := pieces[i]
|
||||
neg_x_j := zero.Sub(one)
|
||||
for j := 0; j < errorLocatorDegree+masterDegree+1; j++ {
|
||||
eq = append(eq, neg_x_j)
|
||||
neg_x_j = neg_x_j.Mul(x)
|
||||
}
|
||||
x_j := one
|
||||
for j := 0; j < errorLocatorDegree+1; j++ {
|
||||
eq = append(eq, x_j.Mul(piece))
|
||||
x_j = x_j.Mul(x)
|
||||
}
|
||||
eqs[i] = eq
|
||||
}
|
||||
// Solve the equations
|
||||
// Assume the top error polynomial term to be one
|
||||
errors := errorLocatorDegree
|
||||
ones := 1
|
||||
var polys []Field
|
||||
for errors >= 0 {
|
||||
if p, ok := trySysSolve(eqs); ok {
|
||||
for i := 0; i < ones; i++ {
|
||||
p = append(p, one)
|
||||
}
|
||||
polys = p
|
||||
break
|
||||
}
|
||||
// caught ZeroDivisionError
|
||||
eqs = eqs[:len(eqs)-1]
|
||||
for i, eq := range eqs {
|
||||
eq[len(eq)-2] = eq[len(eq)-2].Add(eq[len(eq)-1])
|
||||
eqs[i] = eq[:len(eq)-1]
|
||||
}
|
||||
errors--
|
||||
ones++
|
||||
}
|
||||
if errors < 0 {
|
||||
return nil, &NotEnoughData{}
|
||||
}
|
||||
// divide the polynomials...
|
||||
split := errorLocatorDegree + masterDegree + 1
|
||||
div := PolyDiv(polys[:split], polys[split:])
|
||||
corrects := 0
|
||||
for i := 0; i < len(xs); i++ {
|
||||
if EvalPolyAt(div, xs[i]).Value() == pieces[i].Value() {
|
||||
corrects++
|
||||
}
|
||||
}
|
||||
if corrects < masterDegree+errors {
|
||||
return nil, &TooManyErrors{}
|
||||
}
|
||||
return div, nil
|
||||
}
|
||||
|
||||
// Extends a list of integers in [0 ... 255] (if using Galois arithmetic) by
|
||||
// adding n redundant error-correction values
|
||||
func Extend(data []int, n int, arithmetic FieldFactory) ([]int, error) {
|
||||
size := len(data)
|
||||
|
||||
dataF := make([]Field, size)
|
||||
for i, d := range data {
|
||||
dataF[i] = arithmetic.Construct(d)
|
||||
}
|
||||
|
||||
xs := make([]Field, size)
|
||||
for i := range xs {
|
||||
xs[i] = arithmetic.Construct(i)
|
||||
}
|
||||
|
||||
poly, err := BerlekampWelchAttempt(dataF, xs, size-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
data = append(data,
|
||||
int(EvalPolyAt(poly, arithmetic.Construct(size+i)).Value()))
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Repairs a list of integers in [0 ... 255]. Some integers can be erroneous,
|
||||
// and you can put -1 in place of an integer if you know that a certain
|
||||
// value is defective or missing. Uses the Berlekamp-Welch algorithm to
|
||||
// do error-correction
|
||||
func Repair(data []int, datasize int, arithmetic FieldFactory) ([]int, error) {
|
||||
vs := make([]Field, 0, len(data))
|
||||
xs := make([]Field, 0, len(data))
|
||||
for i, d := range data {
|
||||
if d >= 0 {
|
||||
vs = append(vs, arithmetic.Construct(d))
|
||||
xs = append(xs, arithmetic.Construct(i))
|
||||
}
|
||||
}
|
||||
|
||||
poly, err := BerlekampWelchAttempt(vs, xs, datasize-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]int, len(data))
|
||||
for i := range result {
|
||||
result[i] = int(EvalPolyAt(poly, arithmetic.Construct(i)).Value())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func transpose(d [][]int) [][]int {
|
||||
width := len(d[0])
|
||||
result := make([][]int, width)
|
||||
for i := range result {
|
||||
col := make([]int, len(d))
|
||||
for j := range col {
|
||||
col[j] = d[j][i]
|
||||
}
|
||||
result[i] = col
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func extractColumn(d [][]int, j int) []int {
|
||||
result := make([]int, len(d))
|
||||
for i, row := range d {
|
||||
result[i] = row[j]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Extends a list of bytearrays
|
||||
// eg. ExtendChunks([map(ord, 'hello'), map(ord, 'world')], 2)
|
||||
// n is the number of redundant error-correction chunks to add
|
||||
func ExtendChunks(data [][]int, n int, arithmetic FieldFactory) ([][]int, error) {
|
||||
width := len(data[0])
|
||||
o := make([][]int, width)
|
||||
for i := 0; i < width; i++ {
|
||||
row, err := Extend(extractColumn(data, i), n, arithmetic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o[i] = row
|
||||
}
|
||||
return transpose(o), nil
|
||||
}
|
||||
|
||||
// Repairs a list of bytearrays. Use an empty array in place of a missing array.
|
||||
// Individual arrays can contain some missing or erroneous data.
|
||||
func RepairChunks(data [][]int, datasize int, arithmetic FieldFactory) ([][]int, error) {
|
||||
var width int
|
||||
for _, row := range data {
|
||||
if len(row) > 0 {
|
||||
width = len(row)
|
||||
break
|
||||
}
|
||||
}
|
||||
filledData := make([][]int, len(data))
|
||||
for i, row := range data {
|
||||
if len(row) == 0 {
|
||||
filledData[i] = make([]int, width)
|
||||
for j := range filledData[i] {
|
||||
filledData[i][j] = -1
|
||||
}
|
||||
} else {
|
||||
filledData[i] = row
|
||||
}
|
||||
}
|
||||
o := make([][]int, width)
|
||||
for i := range o {
|
||||
row, err := Repair(extractColumn(data, i), datasize, arithmetic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o[i] = row
|
||||
}
|
||||
return transpose(o), nil
|
||||
}
|
||||
@@ -1,549 +0,0 @@
|
||||
#include <array>
|
||||
#include <iostream>
|
||||
#include <exception>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
class ZeroDivisionError : std::domain_error {
|
||||
public:
|
||||
ZeroDivisionError() : domain_error("division by zero") { }
|
||||
};
|
||||
|
||||
// GF(2^8) in the form (Z/2Z)[x]/(x^8+x^4+x^3+x+1)
|
||||
// (the AES polynomial)
|
||||
class Galois {
|
||||
// the coefficients of the polynomial, where the ith bit of `val` is the x^i
|
||||
// coefficient
|
||||
std::uint8_t v;
|
||||
|
||||
// precomputed data: log and exp tables
|
||||
static const std::array<Galois, 255> exptable;
|
||||
static const std::array<std::uint8_t, 256> logtable;
|
||||
|
||||
public:
|
||||
explicit constexpr Galois(unsigned char val) : v(val) { }
|
||||
|
||||
Galois operator+(Galois b) const {
|
||||
return Galois(v ^ b.v);
|
||||
}
|
||||
Galois operator-(Galois b) const {
|
||||
return Galois(v ^ b.v);
|
||||
}
|
||||
Galois operator*(Galois b) const {
|
||||
return v == 0 || b.v == 0
|
||||
? Galois(0)
|
||||
: exptable[(unsigned(logtable[v]) + logtable[b.v]) % 255];
|
||||
}
|
||||
Galois operator/(Galois b) const {
|
||||
if (b.v == 0) {
|
||||
throw ZeroDivisionError();
|
||||
}
|
||||
return v == 0 || b.v == 0
|
||||
? Galois(0)
|
||||
: exptable[(unsigned(logtable[v]) + 255u - logtable[b.v]) % 255];
|
||||
}
|
||||
Galois operator-() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
Galois& operator+=(Galois b) {
|
||||
return *this = *this + b;
|
||||
}
|
||||
Galois& operator-=(Galois b) {
|
||||
return *this = *this - b;
|
||||
}
|
||||
Galois& operator*=(Galois b) {
|
||||
return *this = *this * b;
|
||||
}
|
||||
Galois& operator/=(Galois b) {
|
||||
return *this = *this / b;
|
||||
}
|
||||
|
||||
bool operator==(Galois b) {
|
||||
return v == b.v;
|
||||
}
|
||||
|
||||
// back door
|
||||
std::uint8_t val() const {
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
// Z/pZ, for an odd prime p
|
||||
template<unsigned p>
|
||||
class Modulo {
|
||||
// check that p is prime by trial division
|
||||
static constexpr bool is_prime(unsigned x, unsigned divisor = 2) {
|
||||
return divisor * divisor > x
|
||||
? true
|
||||
: x % divisor != 0 && is_prime(x, divisor + 1);
|
||||
}
|
||||
static_assert(p > 2 && is_prime(p, 2), "p must be an odd prime!");
|
||||
|
||||
unsigned v;
|
||||
|
||||
public:
|
||||
explicit Modulo(unsigned val) : v(val) {
|
||||
assert(v >= 0 && v < p);
|
||||
}
|
||||
|
||||
|
||||
Modulo inv() const {
|
||||
if (v == 0) {
|
||||
throw ZeroDivisionError();
|
||||
}
|
||||
unsigned r = 1, base = v, exp = p-2;
|
||||
while (exp > 0) {
|
||||
if (exp & 1) r = (r * base) % p;
|
||||
base = (base * base) % p;
|
||||
exp >>= 1;
|
||||
}
|
||||
return Modulo(r);
|
||||
}
|
||||
Modulo operator+(Modulo b) const {
|
||||
return Modulo((v + b.v) % p);
|
||||
}
|
||||
Modulo operator-(Modulo b) const {
|
||||
return Modulo((v + p - b.v) % p);
|
||||
}
|
||||
Modulo operator*(Modulo b) const {
|
||||
return Modulo((v * b.v) % p);
|
||||
}
|
||||
Modulo operator/(Modulo b) const {
|
||||
return *this * b.inv();
|
||||
}
|
||||
|
||||
Modulo& operator+=(Modulo b) {
|
||||
return *this = *this + b;
|
||||
}
|
||||
Modulo& operator-=(Modulo b) {
|
||||
return *this = *this - b;
|
||||
}
|
||||
Modulo& operator*=(Modulo b) {
|
||||
return *this = *this * b;
|
||||
}
|
||||
Modulo& operator/=(Modulo b) {
|
||||
return *this = *this / b;
|
||||
}
|
||||
|
||||
bool operator==(Modulo b) {
|
||||
return v == b.v;
|
||||
}
|
||||
|
||||
// back door
|
||||
unsigned val() const {
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
// Evaluates a polynomial p in little-endian form (e.g. x^2 + 3x + 2 is
|
||||
// represented as {2, 3, 1}) at coordinate x,
|
||||
// e.g. eval_poly_at((int[]){2, 3, 1}, 5) = 42.
|
||||
//
|
||||
// T should be a type supporting ring arithmetic and T(0) and T(1) should be the
|
||||
// appropriate identities.
|
||||
//
|
||||
// Range should be a type that can be iterated to get const T& elements.
|
||||
template<typename T, typename Range>
|
||||
T eval_poly_at(const Range& p, T x) {
|
||||
T r(0), xi(1);
|
||||
for (const T& c_i : p) {
|
||||
r += c_i * xi;
|
||||
xi *= x;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
// Given p+1 y values and x values with no errors, recovers the original
|
||||
// degree-p polynomial. For example,
|
||||
// lagrange_interp<double>((double[]){51.0, 59.0, 66.0},
|
||||
// (double[]){1.0, 3.0, 4.0})
|
||||
// = {50.0, 0.0, 1.0}.
|
||||
//
|
||||
// T should be a field and Range should be a sized range type with values of
|
||||
// type T. T(0) and T(1) should be the appropriate field identities.
|
||||
template<typename T, typename Range>
|
||||
std::vector<T> lagrange_interp(const Range& pieces, const Range& xs) {
|
||||
// `size` is the number of datapoints; the degree of the result polynomial
|
||||
// is then `size-1`
|
||||
const unsigned size = pieces.size();
|
||||
assert(size == xs.size());
|
||||
|
||||
std::vector<T> root{T(1)}; // initially just the polynomial "1"
|
||||
// build up the numerator polynomial, `root`, by taking the product of (x-v)
|
||||
// (implemented as convolving repeatedly with [-v, 1])
|
||||
for (const T& v : xs) {
|
||||
// iterate backward since new root[i] depends on old root[i-1]
|
||||
for (unsigned i = root.size(); i--; ) {
|
||||
root[i] *= -v;
|
||||
if (i > 0) root[i] += root[i-1];
|
||||
}
|
||||
// polynomial is always monic so save an extra multiply by doing this
|
||||
// after
|
||||
root.emplace_back(1);
|
||||
}
|
||||
// should have degree `size`
|
||||
assert(root.size() == size + 1);
|
||||
|
||||
// generate per-value numerator polynomials by dividing the master
|
||||
// polynomial back by each x coordinate
|
||||
std::vector<std::vector<T> > nums;
|
||||
nums.reserve(size);
|
||||
for (const T& v : xs) {
|
||||
// divide `root` by (x-v) to get a degree size-1 polynomial
|
||||
// (i.e. with `size` coefficients)
|
||||
std::vector<T> num(size, T(0));
|
||||
// compute the x^0, x^1, ..., x^(p-2) coefficients by long division
|
||||
T last = num.back() = T(1); // still always a monic polynomial
|
||||
for (int i = int(size)-2; i >= 0; --i) {
|
||||
num[i] = last = root[i+1] + last * v;
|
||||
}
|
||||
nums.emplace_back(std::move(num));
|
||||
}
|
||||
assert(nums.size() == size);
|
||||
|
||||
// generate denominators by evaluating numerator polys at their x
|
||||
std::vector<T> denoms;
|
||||
denoms.reserve(size);
|
||||
{
|
||||
unsigned i = 0;
|
||||
for (const T& v : xs) {
|
||||
denoms.push_back(eval_poly_at(nums[i], v));
|
||||
++i;
|
||||
}
|
||||
}
|
||||
assert(denoms.size() == size);
|
||||
|
||||
// generate output polynomial by taking the sum over i of
|
||||
// (nums[i] * pieces[i] / denoms[i])
|
||||
std::vector<T> sum(size, T(0));
|
||||
{
|
||||
unsigned i = 0;
|
||||
for (const T& y : pieces) {
|
||||
T factor = y / denoms[i];
|
||||
// add nums[i] * factor to sum, as a vector
|
||||
for (unsigned j = 0; j < size; ++j) {
|
||||
sum[j] += nums[i][j] * factor;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
// Given two linear equations, eliminates the first variable and returns
|
||||
// the resulting equation.
|
||||
//
|
||||
// An equation of the form a_1 x_1 + ... + a_n x_n + b = 0
|
||||
// is represented as the array [a_1, ..., a_n, b].
|
||||
//
|
||||
// T should be a ring and Range should be an indexable, sized range of T.
|
||||
template<typename T, typename Range>
|
||||
std::vector<T> elim(const Range& a, const Range& b) {
|
||||
assert(a.size() == b.size());
|
||||
std::vector<T> result;
|
||||
const unsigned size = a.size();
|
||||
for (unsigned i = 1; i < size; ++i) {
|
||||
result.push_back(a[i] * b[0] - b[i] * a[0]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Given one homogeneous linear equation and the values of all but the first
|
||||
// variable, solve for the value of the first variable.
|
||||
//
|
||||
// For an equation of the form
|
||||
// a_1 x_1 + ... + a_n x_n = 0
|
||||
// pass two arrays, [a_1, ..., a_n] and [x_2, ..., x_n].
|
||||
//
|
||||
// T should be a field; and R1 and R2 should be indexable, sized ranges of T.
|
||||
template<typename T, typename R1, typename R2>
|
||||
T evaluate(const R1& coeffs, const R2& vals) {
|
||||
assert(coeffs.size() == vals.size() + 1);
|
||||
T total(0);
|
||||
const unsigned size = vals.size();
|
||||
for (unsigned i = 0; i < size; ++i) {
|
||||
total -= coeffs[i+1] * vals[i];
|
||||
}
|
||||
return total / coeffs[0];
|
||||
}
|
||||
|
||||
// Given an n*n system of inhomogeneous linear equations, solve for the value of
|
||||
// every variable.
|
||||
//
|
||||
// For equations of the form
|
||||
// a_1,1 x_1 + ... + a_1,n x_n + b_1 = 0
|
||||
// a_2,1 x_1 + ... + a_2,n x_n + b_2 = 0
|
||||
// ...
|
||||
// a_n,1 x_1 + ... + a_n,n x_n + b_n = 0
|
||||
// pass a two-dimensional array
|
||||
// [[a_1,1, ..., a_1,n, b_1], ..., [a_n,1, ..., a_n,n, b_n]].
|
||||
//
|
||||
// Returns the values of [x_1, ..., x_n].
|
||||
//
|
||||
// T should be a field.
|
||||
template<typename T>
|
||||
std::vector<T> sys_solve(std::vector<std::vector<T>> eqs) {
|
||||
assert(eqs.size() > 0);
|
||||
std::vector<std::vector<T>> back_eqs{eqs[0]};
|
||||
|
||||
while (eqs.size() > 1) {
|
||||
std::vector<std::vector<T>> neweqs;
|
||||
neweqs.reserve(eqs.size()-1);
|
||||
for (unsigned i = 0; i < eqs.size()-1; ++i) {
|
||||
neweqs.push_back(elim<T>(eqs[i], eqs[i+1]));
|
||||
}
|
||||
eqs = std::move(neweqs);
|
||||
// find a row with a nonzero first entry
|
||||
unsigned i = 0;
|
||||
while (i + 1 < eqs.size() && eqs[i][0] == T(0)) {
|
||||
++i;
|
||||
}
|
||||
back_eqs.push_back(eqs[i]);
|
||||
}
|
||||
|
||||
std::vector<T> kvals(back_eqs.size()+1, T(0));
|
||||
kvals.back() = T(1);
|
||||
// back-substitute in reverse order
|
||||
// (smallest to largest equation)
|
||||
for (unsigned i = back_eqs.size(); i--; ) {
|
||||
kvals[i] = evaluate<T>(back_eqs[i],
|
||||
// use the already-computed values + the 1 at the end
|
||||
make_iter_pair(kvals.begin()+i+1, kvals.end()));
|
||||
}
|
||||
|
||||
kvals.pop_back();
|
||||
|
||||
return kvals;
|
||||
}
|
||||
|
||||
// Divide two polynomials with nonzero leading terms.
|
||||
// T should be a field.
|
||||
template<typename T>
|
||||
std::vector<T> polydiv(std::vector<T> Q, const std::vector<T>& E) {
|
||||
if (Q.size() < E.size()) return {};
|
||||
std::vector<T> div(Q.size() - E.size() + 1, T(0));
|
||||
unsigned i = div.size();
|
||||
while (i--) {
|
||||
T factor = Q.back() / E.back();
|
||||
div[i] = factor;
|
||||
// subtract factor * E * x^i from Q
|
||||
Q.pop_back(); // the highest term should cancel
|
||||
for (unsigned j = 0; j < E.size() - 1; ++j) {
|
||||
Q[i+j] -= factor * E[j];
|
||||
}
|
||||
assert(Q.size() == i + E.size() - 1);
|
||||
}
|
||||
return div;
|
||||
}
|
||||
|
||||
// Given a set of y coordinates and x coordinates, and the degree of the
|
||||
// original polynomial, determines the original polynomial even if some of the y
|
||||
// coordinates are wrong. If m is the minimal number of pieces (ie. degree +
|
||||
// 1), t is the total number of pieces provided, then the algo can handle up to
|
||||
// (t-m)/2 errors.
|
||||
//
|
||||
// T should be a field. In particular, division by zero over T should throw
|
||||
// ZeroDivisionError.
|
||||
template<typename T>
|
||||
std::vector<T> berlekamp_welch_attempt(const std::vector<T>& pieces,
|
||||
const std::vector<T>& xs, unsigned master_degree) {
|
||||
const unsigned error_locator_degree = (pieces.size() - master_degree - 1) / 2;
|
||||
// Set up the equations for y[i]E(x[i]) = Q(x[i])
|
||||
// degree(E) = error_locator_degree
|
||||
// degree(Q) = master_degree + error_locator_degree - 1
|
||||
std::vector<std::vector<T>> eqs(2*error_locator_degree + master_degree + 1);
|
||||
for (unsigned i = 0; i < eqs.size(); ++i) {
|
||||
std::vector<T>& eq = eqs[i];
|
||||
const T& x = xs[i];
|
||||
const T& piece = pieces[i];
|
||||
T neg_x_j = T(0) - T(1);
|
||||
for (unsigned j = 0; j < error_locator_degree + master_degree + 1; ++j) {
|
||||
eq.push_back(neg_x_j);
|
||||
neg_x_j *= x;
|
||||
}
|
||||
T x_j = T(1);
|
||||
for (unsigned j = 0; j < error_locator_degree + 1; ++j) {
|
||||
eq.push_back(x_j * piece);
|
||||
x_j *= x;
|
||||
}
|
||||
}
|
||||
// Solve the equations
|
||||
// Assume the top error polynomial term to be one
|
||||
int errors = error_locator_degree;
|
||||
unsigned ones = 1;
|
||||
std::vector<T> polys;
|
||||
while (errors >= 0) {
|
||||
try {
|
||||
polys = sys_solve(eqs);
|
||||
} catch (const ZeroDivisionError&) {
|
||||
eqs.pop_back();
|
||||
for (auto& eq : eqs) {
|
||||
eq[eq.size()-2] += eq.back();
|
||||
eq.pop_back();
|
||||
}
|
||||
--errors;
|
||||
++ones;
|
||||
continue;
|
||||
}
|
||||
for (unsigned i = 0; i < ones; ++i) polys.emplace_back(1);
|
||||
break;
|
||||
}
|
||||
if (errors < 0) {
|
||||
throw std::logic_error("Not enough data!");
|
||||
}
|
||||
// divide the polynomials...
|
||||
const unsigned split = error_locator_degree + master_degree + 1;
|
||||
std::vector<T> div = polydiv(std::vector<T>(polys.begin(), polys.begin() + split),
|
||||
std::vector<T>(polys.begin() + split, polys.end()));
|
||||
unsigned corrects = 0;
|
||||
for (unsigned i = 0; i < xs.size(); ++i) {
|
||||
if (eval_poly_at<T>(div, xs[i]) == pieces[i]) {
|
||||
++corrects;
|
||||
}
|
||||
}
|
||||
if (corrects < master_degree + errors) {
|
||||
throw std::logic_error("Answer doesn't match (too many errors)!");
|
||||
}
|
||||
return div;
|
||||
}
|
||||
|
||||
// Extends a list of integers in [0 ... 255] (if using Galois arithmetic) by
|
||||
// adding n redundant error-correction values
|
||||
template<typename T, typename F=Galois>
|
||||
std::vector<T> extend(std::vector<T> data, unsigned n) {
|
||||
const unsigned size = data.size();
|
||||
|
||||
std::vector<F> data_f;
|
||||
data_f.reserve(size);
|
||||
for (T d : data) data_f.emplace_back(d);
|
||||
|
||||
std::vector<F> xs;
|
||||
for (unsigned i = 0; i < size; ++i) xs.emplace_back(i);
|
||||
|
||||
std::vector<F> poly = berlekamp_welch_attempt(data_f, xs, size-1);
|
||||
|
||||
data.reserve(size+n);
|
||||
for (unsigned i = 0; i < n; ++i) {
|
||||
data.push_back(eval_poly_at(poly, F(T(size + i))).val());
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
// Repairs a list of integers in [0 ... 255]. Some integers can be erroneous,
|
||||
// and you can put -1 in place of an integer if you know that a certain
|
||||
// value is defective or missing. Uses the Berlekamp-Welch algorithm to
|
||||
// do error-correction
|
||||
template<typename T, typename F=Galois>
|
||||
std::vector<T> repair(const std::vector<T>& data, unsigned datasize) {
|
||||
std::vector<F> vs, xs;
|
||||
for (unsigned i = 0; i < data.size(); ++i) {
|
||||
if (data[i] >= 0) {
|
||||
vs.emplace_back(data[i]);
|
||||
xs.emplace_back(T(i));
|
||||
}
|
||||
}
|
||||
std::vector<F> poly = berlekamp_welch_attempt(vs, xs, datasize - 1);
|
||||
std::vector<T> result;
|
||||
for (unsigned i = 0; i < data.size(); ++i) {
|
||||
result.push_back(eval_poly_at(poly, F(T(i))).val());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
std::vector<std::vector<T>> transpose(const std::vector<std::vector<T>>& d) {
|
||||
assert(d.size() > 0);
|
||||
unsigned width = d[0].size();
|
||||
std::vector<std::vector<T>> result(width);
|
||||
for (unsigned i = 0; i < width; ++i) {
|
||||
for (unsigned j = 0; j < d.size(); ++j) {
|
||||
result[i].push_back(d[j][i]);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::vector<T> extract_column(const std::vector<std::vector<T>>& d, unsigned i) {
|
||||
std::vector<T> result;
|
||||
for (unsigned j = 0; j < d.size(); ++j) {
|
||||
result.push_back(d[j][i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Extends a list of bytearrays
|
||||
// eg. extend_chunks([map(ord, 'hello'), map(ord, 'world')], 2)
|
||||
// n is the number of redundant error-correction chunks to add
|
||||
template<typename T, typename F=Galois>
|
||||
std::vector<std::vector<T>> extend_chunks(
|
||||
const std::vector<std::vector<T>>& data,
|
||||
unsigned n) {
|
||||
std::vector<std::vector<T>> o;
|
||||
const unsigned height = data.size();
|
||||
assert(height > 0);
|
||||
const unsigned width = data[0].size();
|
||||
for (unsigned i = 0; i < width; ++i) {
|
||||
o.push_back(extend<T, F>(extract_column(data, i), n));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
|
||||
// Repairs a list of bytearrays. Use an empty array in place of a missing array.
|
||||
// Individual arrays can contain some missing or erroneous data.
|
||||
template<typename T, typename F=Galois>
|
||||
std::vector<std::vector<T>> repair_chunks(
|
||||
std::vector<std::vector<T>> data,
|
||||
unsigned datasize) {
|
||||
unsigned width = 0;
|
||||
for (const std::vector<T>& row : data) {
|
||||
if (row.size() > 0) {
|
||||
width = row.size();
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(width > 0);
|
||||
for (std::vector<T>& row : data) {
|
||||
if (row.size() == 0) {
|
||||
row.assign(width, -1);
|
||||
} else {
|
||||
assert(row.size() == width);
|
||||
}
|
||||
}
|
||||
std::vector<std::vector<T>> o;
|
||||
for (unsigned i = 0; i < width; ++i) {
|
||||
o.push_back(repair<T, F>(extract_column(data, i), datasize));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
|
||||
// Extends either a bytearray or a list of bytearrays or a list of lists...
|
||||
// Used in the cubify method to expand a cube in all dimensions
|
||||
template<typename T, typename F=Galois>
|
||||
struct deep_extend_chunks_helper {
|
||||
static std::vector<T> go(const std::vector<T>& data, unsigned n) {
|
||||
return extend<T, Galois>(data, n);
|
||||
}
|
||||
};
|
||||
template<typename T, typename F>
|
||||
struct deep_extend_chunks_helper<std::vector<T>, F> {
|
||||
static std::vector<std::vector<T>> go(const std::vector<std::vector<T>>& data, unsigned n) {
|
||||
std::vector<std::vector<T>> o;
|
||||
const unsigned height = data.size();
|
||||
assert(height > 0);
|
||||
const unsigned width = data[0].size();
|
||||
for (unsigned i = 0; i < width; ++i) {
|
||||
o.push_back(deep_extend_chunks_helper<T, F>::go(extract_column(data, i), n));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
};
|
||||
template<typename T, typename F=Galois>
|
||||
std::vector<T> deep_extend_chunks(const std::vector<T>& data, unsigned n) {
|
||||
return deep_extend_chunks_helper<T, F>::go(data, n);
|
||||
}
|
||||
@@ -1,478 +0,0 @@
|
||||
(function() {
|
||||
var me = {};
|
||||
|
||||
function ZeroDivisionError() {
|
||||
if (!this) return new ZeroDivisionError();
|
||||
this.message = "division by zero";
|
||||
this.name = "ZeroDivisionError";
|
||||
}
|
||||
me.ZeroDivisionError = ZeroDivisionError;
|
||||
|
||||
// per-byte 2^8 Galois field
|
||||
// Note that this imposes a hard limit that the number of extended chunks can
|
||||
// be at most 256 along each dimension
|
||||
function galoistpl(a) {
|
||||
// 2 is not a primitive root, so we have to use 3 as our logarithm base
|
||||
var r = a ^ (a<<1); // a * (x+1)
|
||||
if (r > 0xff) { // overflow?
|
||||
r = r ^ 0x11b;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
// Precomputing a multiplication and XOR table for increased speed
|
||||
var glogtable = new Array(256);
|
||||
var gexptable = [];
|
||||
(function() {
|
||||
var v = 1;
|
||||
for (var i = 0; i < 255; i++) {
|
||||
glogtable[v] = i;
|
||||
gexptable.push(v);
|
||||
v = galoistpl(v);
|
||||
}
|
||||
})();
|
||||
me.glogtable = glogtable;
|
||||
me.gexptable = gexptable;
|
||||
|
||||
function Galois(val) {
|
||||
if (!(this instanceof Galois)) return new Galois(val);
|
||||
if (val instanceof Galois) {
|
||||
this.val = val.val;
|
||||
} else {
|
||||
this.val = val;
|
||||
}
|
||||
if (typeof Object.freeze == 'function') {
|
||||
Object.freeze(this);
|
||||
}
|
||||
}
|
||||
me.Galois = Galois;
|
||||
Galois.prototype.add = Galois.prototype.sub = function(other) {
|
||||
return new Galois(this.val ^ other.val);
|
||||
};
|
||||
Galois.prototype.mul = function(other) {
|
||||
if (this.val == 0 || other.val == 0) {
|
||||
return new Galois(0);
|
||||
}
|
||||
return new Galois(gexptable[(glogtable[this.val] +
|
||||
glogtable[other.val]) % 255]);
|
||||
};
|
||||
Galois.prototype.div = function(other) {
|
||||
if (other.val == 0) {
|
||||
throw new ZeroDivisionError();
|
||||
}
|
||||
if (this.val == 0) {
|
||||
return new Galois(0);
|
||||
}
|
||||
return new Galois(gexptable[(glogtable[this.val] + 255 -
|
||||
glogtable[other.val]) % 255]);
|
||||
};
|
||||
Galois.prototype.inspect = function() {
|
||||
return ""+this.val;
|
||||
};
|
||||
|
||||
function powmod(b, e, m) {
|
||||
var r = 1;
|
||||
while (e > 0) {
|
||||
if (e & 1) r = (r * b) % m;
|
||||
b = (b * b) % m;
|
||||
e = e >> 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
// Modular division class
|
||||
function mkModuloClass(n) {
|
||||
if (n <= 2) throw new Error("n must be prime!");
|
||||
for (var divisor = 2; divisor * divisor <= n; divisor++) {
|
||||
if (n % divisor == 0) {
|
||||
throw new Error("n must be prime!");
|
||||
}
|
||||
}
|
||||
|
||||
function Mod(val) {
|
||||
if (!(this instanceof Mod)) return new Mod(val);
|
||||
if (val instanceof Mod) {
|
||||
this.val = val.val;
|
||||
} else {
|
||||
this.val = val;
|
||||
}
|
||||
if (typeof Object.freeze == 'function') {
|
||||
Object.freeze(this);
|
||||
}
|
||||
}
|
||||
Mod.modulo = n;
|
||||
Mod.prototype.add = function(other) {
|
||||
return new Mod((this.val + other.val) % n);
|
||||
};
|
||||
Mod.prototype.sub = function(other) {
|
||||
return new Mod((this.val + n - other.val) % n);
|
||||
};
|
||||
Mod.prototype.mul = function(other) {
|
||||
return new Mod((this.val * other.val) % n);
|
||||
};
|
||||
Mod.prototype.div = function(other) {
|
||||
return new Mod((this.val * powmod(other.val, n-2, n)) % n);
|
||||
};
|
||||
Mod.prototype.inspect = function() {
|
||||
return ""+this.val;
|
||||
};
|
||||
|
||||
return Mod;
|
||||
}
|
||||
me.mkModuloClass = mkModuloClass;
|
||||
|
||||
// Evaluates a polynomial in little-endian form, eg. x^2 + 3x + 2 = [2, 3, 1]
|
||||
// (normally I hate little-endian, but in this case dealing with polynomials
|
||||
// it's justified, since you get the nice property that p[n] is the nth degree
|
||||
// term of p) at coordinate x, eg. eval_poly_at([2, 3, 1], 5) = 42 if you are
|
||||
// using float as your arithmetic
|
||||
function eval_poly_at(p, x) {
|
||||
var arithmetic = p[0].constructor;
|
||||
var y = new arithmetic(0);
|
||||
var x_to_the_i = new arithmetic(1);
|
||||
for (var i = 0; i < p.length; i++) {
|
||||
y = y.add(x_to_the_i.mul(p[i]))
|
||||
x_to_the_i = x_to_the_i.mul(x);
|
||||
}
|
||||
return y;
|
||||
}
|
||||
me.eval_poly_at = eval_poly_at;
|
||||
|
||||
// Given p+1 y values and x values with no errors, recovers the original
|
||||
// p+1 degree polynomial. For example,
|
||||
// lagrange_interp([51.0, 59.0, 66.0], [1, 3, 4]) = [50.0, 0, 1.0]
|
||||
// if you are using float as your arithmetic
|
||||
function lagrange_interp(pieces, xs) {
|
||||
var arithmetic = pieces[0].constructor;
|
||||
var zero = new arithmetic(0);
|
||||
var one = new arithmetic(1);
|
||||
// Generate master numerator polynomial
|
||||
var root = [one];
|
||||
var i, j;
|
||||
for (i = 0; i < xs.length; i++) {
|
||||
root.unshift(zero);
|
||||
for (j = 0; j < root.length - 1; j++) {
|
||||
root[j] = root[j].sub(root[j+1].mul(xs[i]));
|
||||
}
|
||||
}
|
||||
// Generate per-value numerator polynomials by dividing the master
|
||||
// polynomial back by each x coordinate
|
||||
var nums = [];
|
||||
for (i = 0; i < xs.length; i++) {
|
||||
var output = [];
|
||||
var last = one;
|
||||
for (j = 2; j < root.length+1; j++) {
|
||||
output.unshift(last);
|
||||
if (j != root.length) {
|
||||
last = root[root.length-j].add(last.mul(xs[i]));
|
||||
}
|
||||
}
|
||||
nums.push(output);
|
||||
}
|
||||
// Generate denominators by evaluating numerator polys at their x
|
||||
var denoms = [];
|
||||
for (i = 0; i < xs.length; i++) {
|
||||
var denom = zero;
|
||||
var x_to_the_j = one;
|
||||
for (j = 0; j < nums[i].length; j++) {
|
||||
denom = denom.add(x_to_the_j.mul(nums[i][j]));
|
||||
x_to_the_j = x_to_the_j.mul(xs[i]);
|
||||
}
|
||||
denoms.push(denom);
|
||||
}
|
||||
// Generate output polynomial
|
||||
var b = [];
|
||||
for (i = 0; i < pieces.length; i++) {
|
||||
b[i] = zero;
|
||||
}
|
||||
for (i = 0; i < xs.length; i++) {
|
||||
var yslice = pieces[i].div(denoms[i]);
|
||||
for (j = 0; j < pieces.length; j++) {
|
||||
b[j] = b[j].add(nums[i][j].mul(yslice));
|
||||
}
|
||||
}
|
||||
return b;
|
||||
}
|
||||
me.lagrange_interp = lagrange_interp;
|
||||
|
||||
// Compresses two linear equations of length n into one
|
||||
// equation of length n-1
|
||||
// Format:
|
||||
// 3x + 4y = 80 (ie. 3x + 4y - 80 = 0) -> a = [3,4,-80]
|
||||
// 5x + 2y = 70 (ie. 5x + 2y - 70 = 0) -> b = [5,2,-70]
|
||||
function elim(a, b) {
|
||||
var c = [];
|
||||
for (var i = 1; i < a.length; i++) {
|
||||
c[i-1] = a[i].mul(b[0]).sub(b[i].mul(a[0]));
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
// Linear equation solver
|
||||
// Format:
|
||||
// 3x + 4y = 80, y = 5 (ie. 3x + 4y - 80z = 0, y = 5, z = 1)
|
||||
// -> coeffs = [3,4,-80], vals = [5,1]
|
||||
function evaluate(coeffs, vals) {
|
||||
var arithmetic = coeffs[0].constructor;
|
||||
var tot = new arithmetic(0);
|
||||
for (var i = 0; i < vals.length; i++) {
|
||||
tot = tot.sub(coeffs[i+1].mul(vals[i]));
|
||||
}
|
||||
if (coeffs[0].val == 0) {
|
||||
throw new ZeroDivisionError();
|
||||
}
|
||||
return tot.div(coeffs[0]);
|
||||
}
|
||||
|
||||
// Linear equation system solver
|
||||
// Format:
|
||||
// ax + by + c = 0, dx + ey + f = 0
|
||||
// -> [[a, b, c], [d, e, f]]
|
||||
// eg.
|
||||
// [[3.0, 5.0, -13.0], [9.0, 1.0, -11.0]] -> [1.0, 2.0]
|
||||
function sys_solve(eqs) {
|
||||
var arithmetic = eqs[0][0].constructor;
|
||||
var one = new arithmetic(1);
|
||||
var back_eqs = [eqs[0]];
|
||||
var i;
|
||||
while (eqs.length > 1) {
|
||||
var neweqs = [];
|
||||
for (i = 0; i < eqs.length - 1; i++) {
|
||||
neweqs.push(elim(eqs[i], eqs[i+1]));
|
||||
}
|
||||
eqs = neweqs;
|
||||
i = 0;
|
||||
while (i < eqs.length - 1 && eqs[i][0].val == 0) {
|
||||
i++;
|
||||
}
|
||||
back_eqs.unshift(eqs[i]);
|
||||
}
|
||||
var kvals = [one];
|
||||
for (i = 0; i < back_eqs.length; i++) {
|
||||
kvals.unshift(evaluate(back_eqs[i], kvals));
|
||||
}
|
||||
return kvals.slice(0, -1);
|
||||
}
|
||||
me.sys_solve = sys_solve;
|
||||
|
||||
function polydiv(Q, E) {
|
||||
var qpoly = Q.slice();
|
||||
var epoly = E.slice();
|
||||
var div = [];
|
||||
while (qpoly.length >= epoly.length) {
|
||||
div.unshift(qpoly[qpoly.length-1].div(epoly[epoly.length-1]));
|
||||
for (var i = 2; i < epoly.length + 1; i++) {
|
||||
qpoly[qpoly.length-i] =
|
||||
qpoly[qpoly.length-i].sub(div[0].mul(epoly[epoly.length-i]));
|
||||
}
|
||||
qpoly.pop();
|
||||
}
|
||||
return div;
|
||||
}
|
||||
me.polydiv = polydiv;
|
||||
|
||||
// Given a set of y coordinates and x coordinates, and the degree of the
|
||||
// original polynomial, determines the original polynomial even if some of
|
||||
// the y coordinates are wrong. If m is the minimal number of pieces (ie.
|
||||
// degree + 1), t is the total number of pieces provided, then the algo can
|
||||
// handle up to (t-m)/2 errors. See:
|
||||
// http://en.wikipedia.org/wiki/Berlekamp%E2%80%93Welch_algorithm#Example
|
||||
// (just skip to my example, the rest of the article sucks imo)
|
||||
function berlekamp_welch_attempt(pieces, xs, master_degree) {
|
||||
var error_locator_degree = Math.floor((pieces.length - master_degree - 1) / 2);
|
||||
var arithmetic = pieces[0].constructor;
|
||||
var zero = new arithmetic(0);
|
||||
var one = new arithmetic(1);
|
||||
// Set up the equations for y[i]E(x[i]) = Q(x[i])
|
||||
// degree(E) = error_locator_degree
|
||||
// degree(Q) = master_degree + error_locator_degree - 1
|
||||
var eqs = [];
|
||||
var i,j;
|
||||
for (i = 0; i < 2 * error_locator_degree + master_degree + 1; i++) {
|
||||
eqs.push([]);
|
||||
}
|
||||
for (i = 0; i < 2 * error_locator_degree + master_degree + 1; i++) {
|
||||
var neg_x_to_the_j = zero.sub(one);
|
||||
for (j = 0; j < error_locator_degree + master_degree + 1; j++) {
|
||||
eqs[i].push(neg_x_to_the_j);
|
||||
neg_x_to_the_j = neg_x_to_the_j.mul(xs[i]);
|
||||
}
|
||||
var x_to_the_j = one;
|
||||
for (j = 0; j < error_locator_degree + 1; j++) {
|
||||
eqs[i].push(x_to_the_j.mul(pieces[i]));
|
||||
x_to_the_j = x_to_the_j.mul(xs[i]);
|
||||
}
|
||||
}
|
||||
// Solve 'em
|
||||
// Assume the top error polynomial term to be one
|
||||
var errors = error_locator_degree;
|
||||
var ones = 1;
|
||||
var polys;
|
||||
while (errors >= 0) {
|
||||
try {
|
||||
polys = sys_solve(eqs);
|
||||
for (i = 0; i < ones; i++) polys.push(one);
|
||||
break;
|
||||
} catch (e) {
|
||||
if (e instanceof ZeroDivisionError) {
|
||||
eqs.pop();
|
||||
for (i = 0; i < eqs.length; i++) {
|
||||
var eq = eqs[i];
|
||||
eq[eq.length-2] = eq[eq.length-2].add(eq[eq.length-1]);
|
||||
eq.pop();
|
||||
}
|
||||
errors--;
|
||||
ones++;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errors < 0) {
|
||||
throw new Error("Not enough data!");
|
||||
}
|
||||
// Divide the polynomials
|
||||
var qpoly = polys.slice(0, error_locator_degree + master_degree + 1);
|
||||
var epoly = polys.slice(error_locator_degree + master_degree + 1);
|
||||
var div = polydiv(qpoly, epoly);
|
||||
// Check
|
||||
var corrects = 0;
|
||||
for (i = 0; i < xs.length; i++) {
|
||||
if (eval_poly_at(div, xs[i]).val == pieces[i].val) {
|
||||
corrects++;
|
||||
}
|
||||
}
|
||||
if (corrects < master_degree + errors) {
|
||||
throw new Error("Answer doesn't match (too many errors)!");
|
||||
}
|
||||
return div;
|
||||
}
|
||||
me.berlekamp_welch_attempt = berlekamp_welch_attempt;
|
||||
|
||||
// Extends a list of integers in [0 ... 255] (if using Galois arithmetic) by
|
||||
// adding n redundant error-correction values
|
||||
function extend(data, n, arithmetic) {
|
||||
arithmetic = arithmetic || Galois;
|
||||
function mk(x) { return new arithmetic(x); }
|
||||
var data2 = data.map(mk);
|
||||
var data3 = data.slice();
|
||||
var xs = [];
|
||||
var i;
|
||||
for (i = 0; i < data.length; i++) {
|
||||
xs.push(new arithmetic(i));
|
||||
}
|
||||
var poly = berlekamp_welch_attempt(data2, xs, data.length - 1);
|
||||
for (i = 0; i < n; i++) {
|
||||
data3.push(eval_poly_at(poly, new arithmetic(data.length + i)).val);
|
||||
}
|
||||
return data3;
|
||||
}
|
||||
me.extend = extend;
|
||||
|
||||
// Repairs a list of integers in [0 ... 255]. Some integers can be
|
||||
// erroneous, and you can put null (or undefined) in place of an integer if
|
||||
// you know that a certain value is defective or missing. Uses the
|
||||
// Berlekamp-Welch algorithm to do error-correction
|
||||
function repair(data, datasize, arithmetic) {
|
||||
arithmetic = arithmetic || Galois;
|
||||
var vs = [];
|
||||
var xs = [];
|
||||
var i;
|
||||
for (var i = 0; i < data.length; i++) {
|
||||
if (data[i] != null) {
|
||||
vs.push(new arithmetic(data[i]));
|
||||
xs.push(new arithmetic(i));
|
||||
}
|
||||
}
|
||||
var poly = berlekamp_welch_attempt(vs, xs, datasize - 1);
|
||||
var result = [];
|
||||
for (i = 0; i < data.length; i++) {
|
||||
result.push(eval_poly_at(poly, new arithmetic(i)).val);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
me.repair = repair;
|
||||
|
||||
function transpose(xs) {
|
||||
var ys = [];
|
||||
for (var i = 0; i < xs[0].length; i++) {
|
||||
var y = [];
|
||||
for (var j = 0; j < xs.length; j++) {
|
||||
y.push(xs[j][i]);
|
||||
}
|
||||
ys.push(y);
|
||||
}
|
||||
return ys;
|
||||
}
|
||||
|
||||
// Extends a list of bytearrays
|
||||
// eg. extend_chunks([map(ord, 'hello'), map(ord, 'world')], 2)
|
||||
// n is the number of redundant error-correction chunks to add
|
||||
function extend_chunks(data, n, arithmetic) {
|
||||
arithmetic = arithmetic || Galois;
|
||||
var o = [];
|
||||
for (var i = 0; i < data[0].length; i++) {
|
||||
o.push(extend(data.map(function(x) { return x[i]; }), n, arithmetic));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
me.extend_chunks = extend_chunks;
|
||||
|
||||
// Repairs a list of bytearrays. Use null in place of a missing array.
|
||||
// Individual arrays can contain some missing or erroneous data.
|
||||
function repair_chunks(data, datasize, arithmetic) {
|
||||
arithmetic = arithmetic || Galois;
|
||||
var first_nonzero = 0;
|
||||
while (data[first_nonzero] == null) {
|
||||
first_nonzero++;
|
||||
}
|
||||
var i;
|
||||
for (i = 0; i < data.length; i++) {
|
||||
if (data[i] == null) {
|
||||
data[i] = new Array(data[first_nonzero].length);
|
||||
}
|
||||
}
|
||||
var o = [];
|
||||
for (i = 0; i < data[0].length; i++) {
|
||||
o.push(repair(data.map(function(x) { return x[i]; }), datasize, arithmetic));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
me.repair_chunks = repair_chunks;
|
||||
|
||||
// Extends either a bytearray or a list of bytearrays or a list of lists...
|
||||
// Used in the cubify method to expand a cube in all dimensions
|
||||
function deep_extend_chunks(data, n, arithmetic) {
|
||||
arithmetic = arithmetic || Galois;
|
||||
if (!(data[0] instanceof Array)) {
|
||||
return extend(data, n, arithmetic)
|
||||
} else {
|
||||
var o = [];
|
||||
for (var i = 0; i < data[0].length; i++) {
|
||||
o.push(deep_extend_chunks(
|
||||
data.map(function(x) { return x[i]; }), n, arithmetic));
|
||||
}
|
||||
return transpose(o);
|
||||
}
|
||||
}
|
||||
me.deep_extend_chunks = deep_extend_chunks;
|
||||
|
||||
function isObject(o) {
|
||||
return typeof o == 'object' || typeof o == 'function';
|
||||
}
|
||||
if (typeof define == 'function' && typeof define.amd == 'object' && define.amd) {
|
||||
define(function() {
|
||||
return me;
|
||||
});
|
||||
} else {
|
||||
done = 0
|
||||
try {
|
||||
if (isObject(module)) { module.exports = me; }
|
||||
else (isObject(window) ? window : this).Erasure = me;
|
||||
}
|
||||
catch(e) {
|
||||
(isObject(window) ? window : this).Erasure = me;
|
||||
}
|
||||
}
|
||||
}.call(this));
|
||||
@@ -1,75 +0,0 @@
|
||||
import random
|
||||
import share
|
||||
|
||||
fsz = 200
|
||||
|
||||
f = ''.join([
|
||||
random.choice('1234567890qwetyuiopasdfghjklzxcvbnm') for x in range(fsz)])
|
||||
|
||||
c = share.split_file(f, 5, 4)
|
||||
|
||||
print 'File split successfully.'
|
||||
print ' '
|
||||
print 'Chunks: '
|
||||
print ' '
|
||||
for chunk in c:
|
||||
print chunk
|
||||
print ' '
|
||||
|
||||
g = ''.join([
|
||||
random.choice('1234567890qwetyuiopasdfghjklzxcvbnm') for x in range(fsz)])
|
||||
|
||||
c2 = share.split_file(g, 5, 4)
|
||||
|
||||
assert share.recombine_file(
|
||||
[c[0], c2[1], c[2], c[3], c2[4], c[5], c[6], c[7], c[8]]) == f
|
||||
|
||||
print '5 of 9 with 7 legit, 2 errors passed'
|
||||
|
||||
assert share.recombine_file(
|
||||
[c[0], c[2], c[3], c2[4], c[6], c[7], c[8]]) == f
|
||||
|
||||
print '5 of 9 with 6 legit, 1 error passed'
|
||||
|
||||
assert share.recombine_file(
|
||||
[c[0], c[3], c[6], c[7], c[8]]) == f
|
||||
|
||||
print '5 of 9 with 5 legit, 0 errors passed'
|
||||
|
||||
chunks3 = share.serialize_cubify(f, 3, 3, 2)
|
||||
|
||||
print ' '
|
||||
print 'Chunks: '
|
||||
print ' '
|
||||
for chunk in chunks3:
|
||||
print chunk
|
||||
print ' '
|
||||
|
||||
for i in range(26):
|
||||
pos = random.randrange(len(chunks3))
|
||||
print 'Removing cell %d' % pos
|
||||
chunks3.pop(pos)
|
||||
|
||||
assert share.full_heal_set(chunks3) == f
|
||||
|
||||
print ' '
|
||||
print 'Cube reconstruction test passed'
|
||||
print ' '
|
||||
|
||||
chunks4 = share.serialize_cubify(g, 3, 3, 2)
|
||||
|
||||
for i in range(7):
|
||||
pos = random.randrange(len(chunks3))
|
||||
chunk = chunks3.pop(pos)
|
||||
print 'Damaging cell %d' % pos
|
||||
print 'Prior: %s' % chunk
|
||||
metadata, content = share.deserialize_chunk(chunk)
|
||||
for j in range(len(content)):
|
||||
content[j] = random.randrange(256)
|
||||
chunks3.append(share.serialize_chunk(content, *metadata))
|
||||
print 'Post: %s' % chunks3[-1]
|
||||
|
||||
assert share.full_heal_set(chunks4) == g
|
||||
|
||||
print ' '
|
||||
print 'Byzantine cube reconstruction test passed'
|
||||
@@ -1,34 +0,0 @@
|
||||
#ifndef UTILS_H_
|
||||
#define UTILS_H_
|
||||
|
||||
#include <utility>
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
|
||||
// turn a pair of iterators into a range
|
||||
template<typename T>
|
||||
class iter_pair {
|
||||
T a, b;
|
||||
typedef typename std::remove_reference<decltype(**static_cast<T*>(nullptr))>::type value_type;
|
||||
public:
|
||||
iter_pair(const T& _a, const T& _b) : a(_a), b(_b) { }
|
||||
iter_pair(T&& _a, T&& _b) : a(std::move(_a)), b(std::move(_b)) { }
|
||||
T begin() const { return a; }
|
||||
T end() const { return b; }
|
||||
|
||||
// for random access iterators
|
||||
value_type& operator[](std::size_t ix) { return *(a + ix); }
|
||||
const value_type& operator[](std::size_t ix) const { return *(a + ix); }
|
||||
std::size_t size() const { return b - a; }
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
iter_pair<T> make_iter_pair(const T& a, const T& b) {
|
||||
return iter_pair<T>(a, b);
|
||||
}
|
||||
template<typename T>
|
||||
iter_pair<T> make_iter_pair(T&& a, T&& b) {
|
||||
return iter_pair<T>(std::move(a), std::move(b));
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1 +0,0 @@
|
||||
from .poly_utils import lagrange_interp, eval_poly_at, mul_polys, div_polys, add_polys, galois_add, galois_mul, galois_div, compose_polys
|
||||
@@ -1,152 +0,0 @@
|
||||
import copy
|
||||
import poly_utils
|
||||
import rlp
|
||||
|
||||
try:
|
||||
from Crypto.Hash import keccak
|
||||
sha3 = lambda x: keccak.new(digest_bits=256, data=x).digest()
|
||||
except ImportError:
|
||||
import sha3 as _sha3
|
||||
sha3 = lambda x: _sha3.sha3_256(x).digest()
|
||||
|
||||
# Every point is an element of GF(2**16), so represents two bytes
|
||||
POINT_SIZE = 2
|
||||
# Every chunk contains 128 points
|
||||
POINTS_IN_CHUNK = 128
|
||||
# A chunk is 256 bytes
|
||||
CHUNK_SIZE = POINT_SIZE * POINTS_IN_CHUNK
|
||||
|
||||
def bytes_to_num(bytez):
|
||||
o = 0
|
||||
for b in bytez:
|
||||
o = (o * 256) + b
|
||||
return o
|
||||
|
||||
def num_to_bytes(inp, n):
|
||||
o = b''
|
||||
for i in range(n):
|
||||
o = bytes([inp % 256]) + o
|
||||
inp //= 256
|
||||
return o
|
||||
|
||||
assert bytes_to_num(num_to_bytes(31337, 2)) == 31337
|
||||
|
||||
# Returns the smallest power of 2 equal to or greater than a number
|
||||
def higher_power_of_2(x):
|
||||
higher_power_of_2 = 1
|
||||
while higher_power_of_2 < x:
|
||||
higher_power_of_2 *= 2
|
||||
return higher_power_of_2
|
||||
|
||||
# Unfortunately, most padding schemes standardized in cryptography seem to only work for
|
||||
# block sizes strictly less than 256 bytes. So we'll use RLP plus zero byte padding
|
||||
# instead (pre-RLP-encode because the RLP encoding adds length data, so the padding
|
||||
# becomes reversible even in cases where the original data ends in zero bytes)
|
||||
def pad(data):
|
||||
med = rlp.encode(data)
|
||||
return med + b'\x00' * (higher_power_of_2(len(med)) - len(med))
|
||||
|
||||
def unpad(data):
|
||||
c, l1, l2 = rlp.codec.consume_length_prefix(data)
|
||||
assert c == str
|
||||
return data[:l1 + l2]
|
||||
|
||||
# Deserialize a chunk into a list of points in GF2**16
|
||||
def chunk_to_points(chunk):
|
||||
return [bytes_to_num(chunk[i: i + POINT_SIZE]) for i in range(0, CHUNK_SIZE, POINT_SIZE)]
|
||||
|
||||
# Serialize a list of points into a chunk
|
||||
def points_to_chunk(points):
|
||||
return b''.join([num_to_bytes(p, POINT_SIZE) for p in points])
|
||||
|
||||
testdata = sha3(b'cow') * (CHUNK_SIZE // 32)
|
||||
assert points_to_chunk(chunk_to_points(testdata)) == testdata
|
||||
|
||||
# Make a Merkle tree out of a set of chunks
|
||||
def merklize(chunks):
|
||||
# Only accept a list of size which is exactly a power of two
|
||||
assert higher_power_of_2(len(chunks)) == len(chunks)
|
||||
merkle_nodes = [sha3(x) for x in chunks]
|
||||
lower_tier = merkle_nodes[::]
|
||||
higher_tier = []
|
||||
while len(higher_tier) != 1:
|
||||
higher_tier = [sha3(lower_tier[i] + lower_tier[i + 1]) for i in range(0, len(lower_tier), 2)]
|
||||
merkle_nodes = higher_tier + merkle_nodes
|
||||
lower_tier = higher_tier
|
||||
merkle_nodes.insert(0, b'\x00' * 32)
|
||||
return merkle_nodes
|
||||
|
||||
|
||||
class Prover():
|
||||
def __init__(self, data):
|
||||
# Pad data
|
||||
pdata = pad(data)
|
||||
byte_chunks = [pdata[i: i + CHUNK_SIZE] for i in range(0, len(pdata), CHUNK_SIZE)]
|
||||
# Decompose it into chunks, where each chunk is a collection of numbers
|
||||
chunks = []
|
||||
for byte_chunk in byte_chunks:
|
||||
chunks.append(chunk_to_points(byte_chunk))
|
||||
# Compute the polynomials representing the ith number in each chunk
|
||||
polys = [poly_utils.lagrange_interp([chunk[i] for chunk in chunks], list(range(len(chunks)))) for i in range(POINTS_IN_CHUNK)]
|
||||
# Use the polynomials to extend the chunks
|
||||
new_chunks = []
|
||||
for x in range(len(chunks), len(chunks) * 2):
|
||||
new_chunks.append(points_to_chunk([poly_utils.eval_poly_at(poly, x) for poly in polys]))
|
||||
# Total length of data including new points
|
||||
self.length = len(byte_chunks + new_chunks)
|
||||
self.extended_data = byte_chunks + new_chunks
|
||||
# Build up the Merkle tree
|
||||
self.merkle_nodes = merklize(self.extended_data)
|
||||
assert len(self.merkle_nodes) == 2 * self.length
|
||||
self.merkle_root = self.merkle_nodes[1]
|
||||
|
||||
# Make a Merkle proof for some index
|
||||
def prove(self, index):
|
||||
assert 0 <= index < self.length
|
||||
adjusted_index = self.length + index
|
||||
o = [self.extended_data[index]]
|
||||
while adjusted_index > 1:
|
||||
o.append(self.merkle_nodes[adjusted_index ^ 1])
|
||||
adjusted_index >>= 1
|
||||
return o
|
||||
|
||||
# Verify a merkle proof of some index (light client friendly)
|
||||
def verify_proof(merkle_root, proof, index):
|
||||
h = sha3(proof[0])
|
||||
for p in proof[1:]:
|
||||
if index % 2:
|
||||
h = sha3(p + h)
|
||||
else:
|
||||
h = sha3(h + p)
|
||||
index //= 2
|
||||
return h == merkle_root
|
||||
|
||||
# Fill data from partially available proofs
|
||||
# This method returning False can also be used as a verifier for fraud proofs
|
||||
def fill(merkle_root, orig_data_length, proofs, indices):
|
||||
if len(proofs) < orig_data_length:
|
||||
raise Exception("Not enough proofs")
|
||||
if len(proofs) > orig_data_length:
|
||||
raise Exception("Too many proofs; if original data has n chunks, n chunks suffice")
|
||||
for proof, index in zip(proofs, indices):
|
||||
if not verify_proof(merkle_root, proof, index):
|
||||
raise Exception("Merkle proof for index %d invalid" % index)
|
||||
# Convert to points
|
||||
coords = [chunk_to_points(p[0]) for p in proofs]
|
||||
# Extract polynomials
|
||||
polys = [poly_utils.lagrange_interp([c[i] for c in coords], indices) for i in range(POINTS_IN_CHUNK)]
|
||||
# Fill in the remaining values
|
||||
full_coords = [None] * orig_data_length * 2
|
||||
for points, index in zip(coords, indices):
|
||||
full_coords[index] = points
|
||||
for i in range(len(full_coords)):
|
||||
if full_coords[i] is None:
|
||||
full_coords[i] = [poly_utils.eval_poly_at(poly, i) for poly in polys]
|
||||
# Serialize
|
||||
full_chunks = [points_to_chunk(points) for points in full_coords]
|
||||
# Merklize
|
||||
merkle_nodes = merklize(full_chunks)
|
||||
# Check equality of the Merkle root
|
||||
if merkle_root != merkle_nodes[1]:
|
||||
return False
|
||||
return full_chunks
|
||||
@@ -1,142 +0,0 @@
|
||||
modulus_poly = [1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 0, 1, 0, 0, 1,
|
||||
1]
|
||||
modulus_poly_as_int = sum([(v << i) for i, v in enumerate(modulus_poly)])
|
||||
degree = len(modulus_poly) - 1
|
||||
|
||||
two_to_the_degree = 2**degree
|
||||
two_to_the_degree_m1 = 2**degree - 1
|
||||
|
||||
def galoistpl(a):
|
||||
# 2 is not a primitive root, so we have to use 3 as our logarithm base
|
||||
if a * 2 < two_to_the_degree:
|
||||
return (a * 2) ^ a
|
||||
else:
|
||||
return (a * 2) ^ a ^ modulus_poly_as_int
|
||||
|
||||
# Precomputing a log table for increased speed of addition and multiplication
|
||||
glogtable = [0] * (two_to_the_degree)
|
||||
gexptable = []
|
||||
v = 1
|
||||
for i in range(two_to_the_degree_m1):
|
||||
glogtable[v] = i
|
||||
gexptable.append(v)
|
||||
v = galoistpl(v)
|
||||
|
||||
gexptable += gexptable + gexptable + [0] * two_to_the_degree * 4
|
||||
glogtable[0] = two_to_the_degree_m1 * 3
|
||||
|
||||
# Add two values in the Galois field
|
||||
def galois_add(x, y):
|
||||
return x ^ y
|
||||
|
||||
# In binary fields, addition and subtraction are the same thing
|
||||
galois_sub = galois_add
|
||||
|
||||
# Multiply two values in the Galois field
|
||||
def galois_mul(x, y):
|
||||
return gexptable[glogtable[x] + glogtable[y]]
|
||||
|
||||
# Divide two values in the Galois field
|
||||
def galois_div(x, y):
|
||||
return gexptable[glogtable[x] - glogtable[y] + two_to_the_degree_m1]
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(p, x):
|
||||
if x == 0:
|
||||
return p[0]
|
||||
y = 0
|
||||
logx = glogtable[x]
|
||||
for i, p_coeff in enumerate(p):
|
||||
if p_coeff:
|
||||
# Add x**i * coeff
|
||||
y ^= gexptable[(logx * i + glogtable[p_coeff]) % two_to_the_degree_m1]
|
||||
return y
|
||||
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(pieces, xs):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = [1]
|
||||
for x in xs:
|
||||
logx = glogtable[x]
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
if root[j+1] and x:
|
||||
root[j] ^= gexptable[glogtable[root[j+1]] + logx]
|
||||
#print(root)
|
||||
assert len(root) == len(pieces) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
nums = []
|
||||
for x in xs:
|
||||
output = [0] * (len(root) - 2) + [1]
|
||||
logx = glogtable[x]
|
||||
for j in range(len(root) - 2, 0, -1):
|
||||
if output[j] and x:
|
||||
output[j-1] = root[j] ^ gexptable[glogtable[output[j]] + logx]
|
||||
else:
|
||||
output[j-1] = root[j]
|
||||
assert len(output) == len(pieces)
|
||||
nums.append(output)
|
||||
#print(nums)
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = [eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
b = [0 for p in pieces]
|
||||
for i in range(len(xs)):
|
||||
log_yslice = glogtable[pieces[i]] - glogtable[denoms[i]] + two_to_the_degree_m1
|
||||
for j in range(len(pieces)):
|
||||
if nums[i][j] and pieces[i]:
|
||||
b[j] ^= gexptable[glogtable[nums[i][j]] + log_yslice]
|
||||
return b
|
||||
|
||||
def add_polys(a, b):
|
||||
return [(a[i] if i < len(a) else 0) ^ (b[i] if i < len(b) else 0)
|
||||
for i in range(max(len(a), len(b)))]
|
||||
|
||||
def mul_by_const(a, c):
|
||||
logc = glogtable[c]
|
||||
return [gexptable[glogtable[x] + logc] for x in a]
|
||||
|
||||
def mul_polys(a, b):
|
||||
o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] ^= gexptable[glogtable[a[i]] + glogtable[b[j]]]
|
||||
return o
|
||||
|
||||
def div_polys(a, b):
|
||||
assert len(a) >= len(b)
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = gexptable[glogtable[a[apos]] - glogtable[b[bpos]] + two_to_the_degree_m1]
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] ^= gexptable[glogtable[b[i]] + glogtable[quot]]
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return o
|
||||
|
||||
def compose_polys(a, b):
|
||||
o = []
|
||||
p = [1]
|
||||
for c in a:
|
||||
o = add_polys(o, mul_by_const(p, c))
|
||||
p = mul_polys(p, b)
|
||||
return o
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
modulus_poly = [1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 0, 1, 0, 0, 1,
|
||||
1]
|
||||
modulus_poly_as_int = sum([(v << i) for i, v in enumerate(modulus_poly)])
|
||||
degree = len(modulus_poly) - 1
|
||||
|
||||
two_to_the_degree = 2**degree
|
||||
two_to_the_degree_m1 = 2**degree - 1
|
||||
|
||||
def galoistpl(a):
|
||||
# 2 is not a primitive root, so we have to use 3 as our logarithm base
|
||||
if a * 2 < two_to_the_degree:
|
||||
return (a * 2) ^ a
|
||||
else:
|
||||
return (a * 2) ^ a ^ modulus_poly_as_int
|
||||
|
||||
# Precomputing a log table for increased speed of addition and multiplication
|
||||
glogtable = [0] * (two_to_the_degree)
|
||||
gexptable = []
|
||||
v = 1
|
||||
for i in range(two_to_the_degree_m1):
|
||||
glogtable[v] = i
|
||||
gexptable.append(v)
|
||||
v = galoistpl(v)
|
||||
|
||||
gexptable += gexptable + gexptable
|
||||
|
||||
# Add two values in the Galois field
|
||||
def galois_add(x, y):
|
||||
return x ^ y
|
||||
|
||||
# In binary fields, addition and subtraction are the same thing
|
||||
galois_sub = galois_add
|
||||
|
||||
# Multiply two values in the Galois field
|
||||
def galois_mul(x, y):
|
||||
return 0 if x*y == 0 else gexptable[glogtable[x] + glogtable[y]]
|
||||
|
||||
# Divide two values in the Galois field
|
||||
def galois_div(x, y):
|
||||
return 0 if x == 0 else gexptable[(glogtable[x] - glogtable[y]) % two_to_the_degree_m1]
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(p, x):
|
||||
if x == 0:
|
||||
return p[0]
|
||||
y = 0
|
||||
logx = glogtable[x]
|
||||
for i, p_coeff in enumerate(p):
|
||||
if p_coeff:
|
||||
# Add x**i * coeff
|
||||
y ^= gexptable[(logx * i + glogtable[p_coeff]) % two_to_the_degree_m1]
|
||||
return y
|
||||
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(pieces, xs):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = mk_root_2(xs)
|
||||
#print(root)
|
||||
assert len(root) == len(pieces) + 1
|
||||
# print(root)
|
||||
# Generate the derivative
|
||||
d = derivative(root)
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = multi_eval_2(d, xs)
|
||||
print(denoms)
|
||||
# denoms = [eval_poly_at(d, xs[i]) for i in range(len(xs))]
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
factors = [galois_div(p, d) for p, d in zip(pieces, denoms)]
|
||||
o = multi_root_derive(xs, factors)
|
||||
# print(o)
|
||||
return o
|
||||
|
||||
def multi_root_derive(xs, muls):
|
||||
if len(xs) == 1:
|
||||
return [muls[0]]
|
||||
R1 = mk_root_2(xs[:len(xs) // 2])
|
||||
R2 = mk_root_2(xs[len(xs) // 2:])
|
||||
x1 = karatsuba_mul(R1, multi_root_derive(xs[len(xs) // 2:], muls[len(muls) // 2:]) + [0])
|
||||
x2 = karatsuba_mul(R2, multi_root_derive(xs[:len(xs) // 2], muls[:len(muls) // 2]) + [0])
|
||||
o = [v1 ^ v2 for v1, v2 in zip(x1, x2)][:len(xs)]
|
||||
# print(len(R1), len(x1), len(xs), len(o))
|
||||
return o
|
||||
|
||||
def multi_root_derive_1(xs, muls):
|
||||
o = [0] * len(xs)
|
||||
for i in range(len(xs)):
|
||||
_xs = xs[:i] + xs[(i+1):]
|
||||
root = mk_root_2(_xs)
|
||||
for j in range(len(root)):
|
||||
o[j] ^= galois_mul(root[j], muls[i])
|
||||
return o
|
||||
|
||||
a = 124
|
||||
b = 8932
|
||||
c = 12415
|
||||
|
||||
assert galois_mul(galois_add(a, b), c) == galois_add(galois_mul(a, c), galois_mul(b, c))
|
||||
|
||||
def karatsuba_mul(p1, p2):
|
||||
L = len(p1)
|
||||
# assert L == len(p2)
|
||||
if L <= 16:
|
||||
o = [0] * (L * 2)
|
||||
for i, v1 in enumerate(p1):
|
||||
for j, v2 in enumerate(p2):
|
||||
if v1 and v2:
|
||||
o[i + j] ^= gexptable[glogtable[v1] + glogtable[v2]]
|
||||
return o
|
||||
if L % 2:
|
||||
p1 = p1 + [0]
|
||||
p2 = p2 + [0]
|
||||
L += 1
|
||||
halflen = L // 2
|
||||
low1 = p1[:halflen]
|
||||
high1 = p1[halflen:]
|
||||
sum1 = [l ^ h for l, h in zip(low1, high1)]
|
||||
low2 = p2[:halflen]
|
||||
high2 = p2[halflen:]
|
||||
sum2 = [l ^ h for l, h in zip(low2, high2)]
|
||||
z2 = karatsuba_mul(high1, high2)
|
||||
z0 = karatsuba_mul(low1, low2)
|
||||
z1 = [m ^ _z0 ^ _z2 for m, _z0, _z2 in zip(karatsuba_mul(sum1, sum2), z0, z2)]
|
||||
o = z0[:halflen] + \
|
||||
[a ^ b for a, b in zip(z0[halflen:], z1[:halflen])] + \
|
||||
[a ^ b for a, b in zip(z2[:halflen], z1[halflen:])] + \
|
||||
z2[halflen:]
|
||||
return o
|
||||
|
||||
def mk_root_1(xs):
|
||||
root = [1]
|
||||
for x in xs:
|
||||
logx = glogtable[x]
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
if root[j+1] and x:
|
||||
root[j] ^= gexptable[glogtable[root[j+1]] + logx]
|
||||
return root
|
||||
|
||||
def mk_root_2(xs):
|
||||
if len(xs) >= 128:
|
||||
return karatsuba_mul(mk_root_2(xs[:len(xs) // 2]), mk_root_2(xs[len(xs) // 2:]))[:len(xs) + 1]
|
||||
root = [1]
|
||||
for x in xs:
|
||||
logx = glogtable[x]
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
if root[j+1] and x:
|
||||
root[j] ^= gexptable[glogtable[root[j+1]] + logx]
|
||||
return root
|
||||
|
||||
def derivative(root):
|
||||
return [0 if i % 2 else r for i, r in enumerate(root[1:])]
|
||||
|
||||
# Credit to http://people.csail.mit.edu/madhu/ST12/scribe/lect06.pdf for the algorithm
|
||||
def xn_mod_poly(p):
|
||||
if len(p) == 1:
|
||||
return [galois_div(1, p[0])]
|
||||
halflen = len(p) // 2
|
||||
lowinv = xn_mod_poly(p[:halflen])
|
||||
submod_high = karatsuba_mul(lowinv, p[:halflen])[halflen:]
|
||||
med = karatsuba_mul(p[halflen:], lowinv)[:halflen]
|
||||
med_plus_high = [x ^ y for x, y in zip(med, submod_high)]
|
||||
highinv = karatsuba_mul(med_plus_high, lowinv)
|
||||
o = (lowinv + highinv)[:len(p)]
|
||||
print(halflen, lowinv, submod_high, med, highinv)
|
||||
# assert karatsuba_mul(o, p)[:len(p)] == [1] + [0] * (len(p) - 1)
|
||||
return o
|
||||
|
||||
def mod(a, b):
|
||||
assert len(a) == 2 * (len(b) - 1)
|
||||
L = len(b)
|
||||
inv_rev_b = xn_mod_poly(b[::-1] + [0] * (len(a) - L))[:L]
|
||||
quot = karatsuba_mul(inv_rev_b, a[::-1][:L])[:L-1][::-1]
|
||||
subt = karatsuba_mul(b, quot + [0])[:-1]
|
||||
o = [x ^ y for x, y in zip(a[:L-1], subt[:L-1])]
|
||||
# assert [x^y for x, y in zip(karatsuba_mul(quot + [0], b), o)] == a
|
||||
return o
|
||||
|
||||
def multi_eval_1(poly, xs):
|
||||
return [eval_poly_at(poly, x) for x in xs]
|
||||
|
||||
def multi_eval_2(poly, xs):
|
||||
if len(xs) <= 1024:
|
||||
return [eval_poly_at(poly, x) for x in xs]
|
||||
halflen = len(xs) // 2
|
||||
return multi_eval_2(mod(poly, mk_root_2(xs[:halflen])), xs[:halflen]) + \
|
||||
multi_eval_2(mod(poly, mk_root_2(xs[halflen:])), xs[halflen:])
|
||||
# [eval_poly_at(poly, xs[-2]), eval_poly_at(poly, xs[-1])]
|
||||
@@ -1,340 +0,0 @@
|
||||
#include "stdlib.h"
|
||||
#include "stdio.h"
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <deque>
|
||||
|
||||
using namespace std;
|
||||
|
||||
vector<int> glogtable(65536, 0);
|
||||
vector<int> gexptable(196608, 0);
|
||||
|
||||
const int ROOT_CUTOFF = 32;
|
||||
|
||||
void initialize_tables() {
|
||||
int v = 1;
|
||||
for (int i = 0; i < 65536; i++) {
|
||||
glogtable[v] = i;
|
||||
gexptable[i] = v;
|
||||
gexptable[i + 65535] = v;
|
||||
gexptable[i + 131070] = v;
|
||||
if (v & 32768)
|
||||
v = (v * 2) ^ v ^ 103425;
|
||||
else
|
||||
v = (v * 2) ^ v;
|
||||
}
|
||||
}
|
||||
|
||||
int eval_poly_at(vector<int> poly, int x) {
|
||||
if (x == 0)
|
||||
return poly[0];
|
||||
int logx = glogtable[x];
|
||||
int y = 0;
|
||||
for (int i = 0; i < poly.size(); i++) {
|
||||
if (poly[i])
|
||||
y ^= gexptable[(logx * i + glogtable[poly[i]]) % 65535];
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
int eval_log_poly_at(vector<int> poly, int x) {
|
||||
if (x == 0)
|
||||
return poly[0] == 65537 ? 0 : gexptable[poly[0]];
|
||||
int logx = glogtable[x];
|
||||
int y = 0;
|
||||
for (int i = 0; i < poly.size(); i++) {
|
||||
if (poly[i] != 65537)
|
||||
y ^= gexptable[(logx * i + poly[i]) % 65535];
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
// Compute the product of two (equal length) polynomials. Takes ~O(N ** 1.59) time.
|
||||
vector<int> karatsuba_mul(vector<int> p, vector<int> q) {
|
||||
int L = p.size();
|
||||
if (L <= 64) {
|
||||
vector<int> o(L * 2);
|
||||
vector<int> logq(L);
|
||||
for (int i = 0; i < L; i++) logq[i] = glogtable[q[i]];
|
||||
for (int i = 0; i < L; i++) {
|
||||
int log_pi = glogtable[p[i]];
|
||||
for (int j = 0; j < L; j++) {
|
||||
if (p[i] && q[j])
|
||||
o[i + j] ^= gexptable[log_pi + logq[j]];
|
||||
}
|
||||
}
|
||||
return o;
|
||||
}
|
||||
if (L % 2) {
|
||||
L += 1;
|
||||
p.push_back(0);
|
||||
q.push_back(0);
|
||||
}
|
||||
int halflen = L / 2;
|
||||
vector<int> low1 = vector<int>(p.begin(), p.begin() + halflen);
|
||||
vector<int> low2 = vector<int>(q.begin(), q.begin() + halflen);
|
||||
vector<int> high1 = vector<int>(p.begin() + halflen, p.end());
|
||||
vector<int> high2 = vector<int>(q.begin() + halflen, q.end());
|
||||
vector<int> sum1(halflen);
|
||||
vector<int> sum2(halflen);
|
||||
for (int i = 0; i < halflen; i++) {
|
||||
sum1[i] = low1[i] ^ high1[i];
|
||||
sum2[i] = low2[i] ^ high2[i];
|
||||
}
|
||||
vector<int> z0 = karatsuba_mul(low1, low2);
|
||||
vector<int> z2 = karatsuba_mul(high1, high2);
|
||||
vector<int> m = karatsuba_mul(sum1, sum2);
|
||||
vector<int> o(L * 2);
|
||||
for (int i = 0; i < L; i++) {
|
||||
o[i] ^= z0[i];
|
||||
o[i + halflen] ^= (m[i] ^ z0[i] ^ z2[i]);
|
||||
o[i + L] ^= z2[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> mk_root(vector<int> xs) {
|
||||
int L = xs.size();
|
||||
if (L >= ROOT_CUTOFF) {
|
||||
int halflen = L / 2;
|
||||
vector<int> left = vector<int>(xs.begin(), xs.begin() + halflen);
|
||||
vector<int> right = vector<int>(xs.begin() + halflen, xs.end());
|
||||
vector<int> o = karatsuba_mul(mk_root(left), mk_root(right));
|
||||
o.resize(L + 1);
|
||||
return o;
|
||||
}
|
||||
vector<int> root(L + 1);
|
||||
root[L] = 1;
|
||||
for (int i = 0; i < L; i++) {
|
||||
int logx = glogtable[xs[i]];
|
||||
int offset = L - i - 1;
|
||||
root[offset] = 0;
|
||||
for (int j = offset; j < i + 1 + offset; j++) {
|
||||
if (root[j + 1] and xs[i])
|
||||
root[j] ^= gexptable[glogtable[root[j+1]] + logx];
|
||||
}
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
vector<int> subroot_linear_combination(vector<int> xs, vector<int> factors) {
|
||||
int L = xs.size();
|
||||
/*if (L <= ROOT_CUTOFF) {
|
||||
vector<int> out(L + 1);
|
||||
vector<int> root = mk_root(xs);
|
||||
for (int i = 0; i < L; i++) {
|
||||
vector<int> output(L + 1);
|
||||
output[root.size() - 2] = 1;
|
||||
int logx = glogtable[xs[i]];
|
||||
if (factors[i]) {
|
||||
int log_fac = glogtable[factors[i]];
|
||||
for (int j = root.size() - 2; j > 0; j--) {
|
||||
if (output[j] and xs[i])
|
||||
output[j - 1] = root[j] ^ gexptable[glogtable[output[j]] + logx];
|
||||
else
|
||||
output[j - 1] = root[j];
|
||||
out[j] ^= gexptable[glogtable[output[j]] + log_fac];
|
||||
}
|
||||
out[0] ^= gexptable[glogtable[output[0]] + log_fac];
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}*/
|
||||
if (L == 1) {
|
||||
vector<int> o(2);
|
||||
o[0] = factors[0];
|
||||
return o;
|
||||
}
|
||||
int halflen = L / 2;
|
||||
vector<int> xs_left = vector<int>(xs.begin(), xs.begin() + halflen);
|
||||
vector<int> xs_right = vector<int>(xs.begin() + halflen, xs.end());
|
||||
vector<int> factors_left = vector<int>(factors.begin(), factors.begin() + halflen);
|
||||
vector<int> factors_right = vector<int>(factors.begin() + halflen, factors.end());
|
||||
vector<int> R1 = mk_root(xs_left);
|
||||
vector<int> R2 = mk_root(xs_right);
|
||||
vector<int> o1 = karatsuba_mul(R1, subroot_linear_combination(xs_right, factors_right));
|
||||
vector<int> o2 = karatsuba_mul(R2, subroot_linear_combination(xs_left, factors_left));
|
||||
vector<int> o(L + 1);
|
||||
for (int i = 0; i < L; i++) {
|
||||
o[i] = o1[i] ^ o2[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
|
||||
vector<int> derivative_and_square_base(vector<int> p) {
|
||||
vector<int> o((p.size() - 1) / 2);
|
||||
for (int i = 0; i < o.size(); i+= 1) {
|
||||
o[i] = p[i * 2 + 1];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> poly_to_logs(vector<int> p) {
|
||||
vector<int> o(p.size());
|
||||
for (int i = 0; i < p.size(); i++) {
|
||||
if (p[i])
|
||||
o[i] = glogtable[p[i]];
|
||||
else
|
||||
o[i] = 65537;
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> xn_mod_poly(vector<int> inp) {
|
||||
if (inp.size() == 1) {
|
||||
vector<int> o(1);
|
||||
o[0] = gexptable[65535 - glogtable[inp[0]]];
|
||||
return o;
|
||||
}
|
||||
int halflen = inp.size() / 2;
|
||||
int highlen = inp.size() - (inp.size() / 2);
|
||||
vector<int> low(inp.begin(), inp.begin() + halflen);
|
||||
vector<int> high(inp.begin() + halflen, inp.end());
|
||||
vector<int> lowinv = xn_mod_poly(low);
|
||||
vector<int> submod = karatsuba_mul(lowinv, low);
|
||||
vector<int> submod_high(submod.begin() + halflen, submod.end());
|
||||
lowinv.resize(highlen);
|
||||
vector<int> med = karatsuba_mul(high, lowinv);
|
||||
vector<int> med_plus_high(halflen);
|
||||
for (int i = 0; i < halflen; i++) {
|
||||
med_plus_high[i] = med[i] ^ submod_high[i];
|
||||
}
|
||||
vector<int> highinv = karatsuba_mul(med_plus_high, lowinv);
|
||||
vector<int> o(inp.size());
|
||||
for (int i = 0; i < halflen; i++) {
|
||||
o[i] = lowinv[i];
|
||||
o[i + halflen] = highinv[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> reverse(vector<int> inp) {
|
||||
vector<int> o(inp.size());
|
||||
for (int i = 0; i < inp.size(); i++) {
|
||||
o[inp.size() - 1 - i] = inp[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> mod(vector<int> a, vector<int> b) {
|
||||
int L = b.size();
|
||||
vector<int> rev_b = reverse(b);
|
||||
rev_b.resize((L - 1) * 2);
|
||||
vector<int> inv_rev_b = xn_mod_poly(reverse(b));
|
||||
inv_rev_b.resize(L);
|
||||
vector<int> rev_a = reverse(a);
|
||||
rev_a.resize(L);
|
||||
vector<int> rev_quotient = karatsuba_mul(inv_rev_b, rev_a);
|
||||
rev_quotient.resize(L - 1);
|
||||
vector<int> quotient = reverse(rev_quotient);
|
||||
quotient.resize(L);
|
||||
vector<int> diff = karatsuba_mul(b, quotient);
|
||||
vector<int> o(L-1);
|
||||
for (int i = 0; i < L-1; i++) {
|
||||
o[i] = a[i] ^ diff[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> multi_eval(vector<int> poly, vector<int> xs) {
|
||||
int L = xs.size();
|
||||
if (L <= 1024) {
|
||||
vector<int> o(L);
|
||||
vector<int> logz = poly_to_logs(poly);
|
||||
for (int i = 0; i < L; i++) {
|
||||
o[i] = eval_log_poly_at(logz, xs[i]);
|
||||
}
|
||||
return o;
|
||||
}
|
||||
int halflen = L / 2;
|
||||
vector<int> left(xs.begin(), xs.begin() + halflen);
|
||||
vector<int> right(xs.begin() + halflen, xs.end());
|
||||
vector<int> o1;
|
||||
vector<int> o2;
|
||||
if (poly.size() < xs.size()) {
|
||||
o1 = multi_eval(poly, left);
|
||||
o2 = multi_eval(poly, right);
|
||||
}
|
||||
else {
|
||||
o1 = multi_eval(mod(poly, mk_root(left)), left);
|
||||
o2 = multi_eval(mod(poly, mk_root(right)), right);
|
||||
}
|
||||
o1.resize(L);
|
||||
for (int i = 0; i < halflen; i++) {
|
||||
o1[halflen + i] = o2[i];
|
||||
}
|
||||
return o1;
|
||||
}
|
||||
|
||||
vector<int> lagrange_interp(vector<int> ys, vector<int> xs) {
|
||||
int xs_size = xs.size();
|
||||
vector<int> root = mk_root(xs);
|
||||
vector<int> rootprime = derivative_and_square_base(root);
|
||||
vector<int> xsquares(xs_size);
|
||||
for (int i = 0; i < xs_size; i++)
|
||||
xsquares[i] = xs[i] ? gexptable[glogtable[xs[i]] * 2] : 0;
|
||||
vector<int> denoms = multi_eval(rootprime, xsquares);
|
||||
vector<int> factors(xs_size);
|
||||
for (int i = 0; i < xs_size; i++) {
|
||||
if (ys[i])
|
||||
factors[i] = gexptable[glogtable[ys[i]] + 65535 - glogtable[denoms[i]]];
|
||||
}
|
||||
vector<int> o = subroot_linear_combination(xs, factors);
|
||||
o.resize(xs_size);
|
||||
return o;
|
||||
}
|
||||
|
||||
const int SIZE = 4096;
|
||||
|
||||
|
||||
int main() {
|
||||
initialize_tables();
|
||||
/*int myxs[] = {1, 2, 3, 4, 5};
|
||||
std::vector<int> test (myxs, myxs + sizeof(myxs) / sizeof(int) );
|
||||
int myxs2[] = {6, 7, 8, 9, 10, 11, 12, 13};
|
||||
std::vector<int> test2 (myxs2, myxs2 + sizeof(myxs2) / sizeof(int) );*/
|
||||
/*std::vector<int> test(257);
|
||||
for (int i = 0; i < 257; i++) test[i] = i;
|
||||
std::vector<int> test2(512);
|
||||
for (int i = 0; i < 512; i++) test2[i] = 1000 + i;
|
||||
vector<int> moose = mod(test2, test);
|
||||
for (int i = 0; i < 256; i++) cout << moose[i] << " ";
|
||||
cout << "\n";*/
|
||||
|
||||
vector<int> xs(SIZE);
|
||||
vector<int> ys(SIZE);
|
||||
for (int v = 0; v < SIZE; v++) {
|
||||
ys[v] = v * 3;
|
||||
xs[v] = 1000 + v * 7;
|
||||
}
|
||||
//vector<int> d = derivative(mk_root(xs));
|
||||
//for (int i = 0; i < d.size(); i++) cout << d[i] << " ";
|
||||
//cout << "\n";
|
||||
/*vector<int> prod = mk_root(xs);
|
||||
vector<int> prod = karatsuba_mul(xs, ys);
|
||||
for (int i = 0; i < SIZE + 1; i++)
|
||||
cout << prod[i] << " ";
|
||||
cout << "\n";
|
||||
cout << eval_poly_at(prod, 189) << " " << gexptable[glogtable[eval_poly_at(xs, 189)] + glogtable[eval_poly_at(ys, 189)]] << "\n";*/
|
||||
for (int a = 0; a < 10; a++) {
|
||||
ys[0] = a;
|
||||
vector<int> poly = lagrange_interp(ys, xs);
|
||||
vector<int> new_xs(SIZE);
|
||||
for (int i = 0; i < SIZE; i++) new_xs[i] = SIZE + i;
|
||||
vector<int> results = multi_eval(poly, new_xs);
|
||||
cout << eval_poly_at(poly, 1700) << "\n";
|
||||
unsigned int o = 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
o += results[i];
|
||||
}
|
||||
cout << o << "\n";
|
||||
}
|
||||
//cout << eval_poly_at(poly, 0) << " " << ys[0] << "\n";
|
||||
//cout << eval_poly_at(poly, 134) << " " << ys[134] << "\n";
|
||||
//cout << eval_poly_at(poly, 375) << " " << ys[375] << "\n";
|
||||
//int o;
|
||||
//for (int i = 0; i < 524288; i ++)
|
||||
// o += eval_poly_at(poly, i % 65536);
|
||||
//std::cout << o;
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
#include "stdlib.h"
|
||||
#include "stdio.h"
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <deque>
|
||||
|
||||
using namespace std;
|
||||
|
||||
vector<int> glogtable(65536, 0);
|
||||
vector<int> gexptable(196608, 0);
|
||||
|
||||
const int ROOT_CUTOFF = 32;
|
||||
|
||||
void initialize_tables() {
|
||||
int v = 1;
|
||||
for (int i = 0; i < 65536; i++) {
|
||||
glogtable[v] = i;
|
||||
gexptable[i] = v;
|
||||
gexptable[i + 65535] = v;
|
||||
gexptable[i + 131070] = v;
|
||||
if (v & 32768)
|
||||
v = (v * 2) ^ v ^ 103425;
|
||||
else
|
||||
v = (v * 2) ^ v;
|
||||
}
|
||||
}
|
||||
|
||||
int eval_poly_at(vector<int> poly, int x) {
|
||||
if (x == 0)
|
||||
return poly[0];
|
||||
int logx = glogtable[x];
|
||||
int y = 0;
|
||||
for (int i = 0; i < poly.size(); i++) {
|
||||
if (poly[i])
|
||||
y ^= gexptable[(logx * i + glogtable[poly[i]]) % 65535];
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
int eval_log_poly_at(vector<int> poly, int x) {
|
||||
if (x == 0)
|
||||
return poly[0] == 65537 ? 0 : gexptable[poly[0]];
|
||||
int logx = glogtable[x];
|
||||
int y = 0;
|
||||
for (int i = 0; i < poly.size(); i++) {
|
||||
if (poly[i] != 65537)
|
||||
y ^= gexptable[(logx * i + poly[i]) % 65535];
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
// Compute the product of two (equal length) polynomials. Takes ~O(N ** 1.59) time.
|
||||
vector<int> karatsuba_mul(vector<int> p, vector<int> q) {
|
||||
int L = p.size();
|
||||
if (L <= 64) {
|
||||
vector<int> o(L * 2);
|
||||
vector<int> logq(L);
|
||||
for (int i = 0; i < L; i++) logq[i] = glogtable[q[i]];
|
||||
for (int i = 0; i < L; i++) {
|
||||
int log_pi = glogtable[p[i]];
|
||||
for (int j = 0; j < L; j++) {
|
||||
if (p[i] && q[j])
|
||||
o[i + j] ^= gexptable[log_pi + logq[j]];
|
||||
}
|
||||
}
|
||||
return o;
|
||||
}
|
||||
if (L % 2) {
|
||||
L += 1;
|
||||
p.push_back(0);
|
||||
q.push_back(0);
|
||||
}
|
||||
int halflen = L / 2;
|
||||
vector<int> low1 = vector<int>(p.begin(), p.begin() + halflen);
|
||||
vector<int> low2 = vector<int>(q.begin(), q.begin() + halflen);
|
||||
vector<int> high1 = vector<int>(p.begin() + halflen, p.end());
|
||||
vector<int> high2 = vector<int>(q.begin() + halflen, q.end());
|
||||
vector<int> sum1(halflen);
|
||||
vector<int> sum2(halflen);
|
||||
for (int i = 0; i < halflen; i++) {
|
||||
sum1[i] = low1[i] ^ high1[i];
|
||||
sum2[i] = low2[i] ^ high2[i];
|
||||
}
|
||||
vector<int> z0 = karatsuba_mul(low1, low2);
|
||||
vector<int> z2 = karatsuba_mul(high1, high2);
|
||||
vector<int> m = karatsuba_mul(sum1, sum2);
|
||||
vector<int> o(L * 2);
|
||||
for (int i = 0; i < L; i++) {
|
||||
o[i] ^= z0[i];
|
||||
o[i + halflen] ^= (m[i] ^ z0[i] ^ z2[i]);
|
||||
o[i + L] ^= z2[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> mk_root(vector<int> xs) {
|
||||
int L = xs.size();
|
||||
if (L >= ROOT_CUTOFF) {
|
||||
int halflen = L / 2;
|
||||
vector<int> left = vector<int>(xs.begin(), xs.begin() + halflen);
|
||||
vector<int> right = vector<int>(xs.begin() + halflen, xs.end());
|
||||
vector<int> o = karatsuba_mul(mk_root(left), mk_root(right));
|
||||
o.resize(L + 1);
|
||||
return o;
|
||||
}
|
||||
vector<int> root(L + 1);
|
||||
root[L] = 1;
|
||||
for (int i = 0; i < L; i++) {
|
||||
int logx = glogtable[xs[i]];
|
||||
int offset = L - i - 1;
|
||||
root[offset] = 0;
|
||||
for (int j = offset; j < i + 1 + offset; j++) {
|
||||
if (root[j + 1] and xs[i])
|
||||
root[j] ^= gexptable[glogtable[root[j+1]] + logx];
|
||||
}
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
vector<int> subroot_linear_combination(vector<int> xs, vector<int> factors) {
|
||||
int L = xs.size();
|
||||
/*if (L <= ROOT_CUTOFF) {
|
||||
vector<int> out(L + 1);
|
||||
vector<int> root = mk_root(xs);
|
||||
for (int i = 0; i < L; i++) {
|
||||
vector<int> output(L + 1);
|
||||
output[root.size() - 2] = 1;
|
||||
int logx = glogtable[xs[i]];
|
||||
if (factors[i]) {
|
||||
int log_fac = glogtable[factors[i]];
|
||||
for (int j = root.size() - 2; j > 0; j--) {
|
||||
if (output[j] and xs[i])
|
||||
output[j - 1] = root[j] ^ gexptable[glogtable[output[j]] + logx];
|
||||
else
|
||||
output[j - 1] = root[j];
|
||||
out[j] ^= gexptable[glogtable[output[j]] + log_fac];
|
||||
}
|
||||
out[0] ^= gexptable[glogtable[output[0]] + log_fac];
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}*/
|
||||
if (L == 1) {
|
||||
vector<int> o(2);
|
||||
o[0] = factors[0];
|
||||
return o;
|
||||
}
|
||||
int halflen = L / 2;
|
||||
vector<int> xs_left = vector<int>(xs.begin(), xs.begin() + halflen);
|
||||
vector<int> xs_right = vector<int>(xs.begin() + halflen, xs.end());
|
||||
vector<int> factors_left = vector<int>(factors.begin(), factors.begin() + halflen);
|
||||
vector<int> factors_right = vector<int>(factors.begin() + halflen, factors.end());
|
||||
vector<int> R1 = mk_root(xs_left);
|
||||
vector<int> R2 = mk_root(xs_right);
|
||||
vector<int> o1 = karatsuba_mul(R1, subroot_linear_combination(xs_right, factors_right));
|
||||
vector<int> o2 = karatsuba_mul(R2, subroot_linear_combination(xs_left, factors_left));
|
||||
vector<int> o(L + 1);
|
||||
for (int i = 0; i < L; i++) {
|
||||
o[i] = o1[i] ^ o2[i];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
|
||||
vector<int> derivative_and_square_base(vector<int> p) {
|
||||
vector<int> o((p.size() - 1) / 2);
|
||||
for (int i = 0; i < o.size(); i+= 1) {
|
||||
o[i] = p[i * 2 + 1];
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> poly_to_logs(vector<int> p) {
|
||||
vector<int> o(p.size());
|
||||
for (int i = 0; i < p.size(); i++) {
|
||||
if (p[i])
|
||||
o[i] = glogtable[p[i]];
|
||||
else
|
||||
o[i] = 65537;
|
||||
}
|
||||
return o;
|
||||
}
|
||||
|
||||
vector<int> lagrange_interp(vector<int> ys, vector<int> xs) {
|
||||
int xs_size = xs.size();
|
||||
vector<int> root = mk_root(xs);
|
||||
vector<int> log_rootprime = poly_to_logs(derivative_and_square_base(root));
|
||||
vector<int> factors(xs_size);
|
||||
for (int i = 0; i < xs_size; i++) {
|
||||
int x_square = xs[i] ? gexptable[glogtable[xs[i]] * 2] : 0;
|
||||
int denom = eval_log_poly_at(log_rootprime, x_square);
|
||||
if (ys[i])
|
||||
factors[i] = gexptable[glogtable[ys[i]] + 65535 - glogtable[denom]];
|
||||
}
|
||||
return subroot_linear_combination(xs, factors);
|
||||
}
|
||||
|
||||
const int SIZE = 4096;
|
||||
|
||||
|
||||
int main() {
|
||||
initialize_tables();
|
||||
//int myxs[] = {1, 2, 3, 4};
|
||||
//std::vector<int> xs (myxs, myxs + sizeof(myxs) / sizeof(int) );
|
||||
vector<int> xs(SIZE);
|
||||
vector<int> ys(SIZE);
|
||||
for (int v = 0; v < SIZE; v++) {
|
||||
ys[v] = v * 3;
|
||||
xs[v] = 1000 + v * 7;
|
||||
}
|
||||
//vector<int> d = derivative(mk_root(xs));
|
||||
//for (int i = 0; i < d.size(); i++) cout << d[i] << " ";
|
||||
//cout << "\n";
|
||||
/*vector<int> prod = mk_root(xs);
|
||||
vector<int> prod = karatsuba_mul(xs, ys);
|
||||
for (int i = 0; i < SIZE + 1; i++)
|
||||
cout << prod[i] << " ";
|
||||
cout << "\n";
|
||||
cout << eval_poly_at(prod, 189) << " " << gexptable[glogtable[eval_poly_at(xs, 189)] + glogtable[eval_poly_at(ys, 189)]] << "\n";*/
|
||||
for (int a = 0; a < 10; a++) {
|
||||
ys[0] = a;
|
||||
vector<int> poly = lagrange_interp(ys, xs);
|
||||
vector<int> logpoly = poly_to_logs(poly);
|
||||
cout << eval_poly_at(poly, 1700) << "\n";
|
||||
unsigned int o = 0;
|
||||
for (int i = SIZE; i < SIZE * 2; i++) {
|
||||
o += eval_log_poly_at(logpoly, i);
|
||||
}
|
||||
cout << o << "\n";
|
||||
}
|
||||
//cout << eval_poly_at(poly, 0) << " " << ys[0] << "\n";
|
||||
//cout << eval_poly_at(poly, 134) << " " << ys[134] << "\n";
|
||||
//cout << eval_poly_at(poly, 375) << " " << ys[375] << "\n";
|
||||
//int o;
|
||||
//for (int i = 0; i < 524288; i ++)
|
||||
// o += eval_poly_at(poly, i % 65536);
|
||||
//std::cout << o;
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
with open('README.md') as f:
|
||||
readme = f.read()
|
||||
|
||||
with open('LICENSE') as f:
|
||||
license = f.read()
|
||||
|
||||
setup(
|
||||
name='ec65536',
|
||||
version='1.0.0',
|
||||
description='Erasure code utilities for GF16',
|
||||
long_description=readme,
|
||||
author='Vitalik Buterin',
|
||||
author_email='',
|
||||
url='https://github.com/ethereum/research/tree/master/erasure_code/ec65536',
|
||||
license=license,
|
||||
packages=find_packages(exclude=('tests', 'docs')),
|
||||
install_requires=[
|
||||
],
|
||||
)
|
||||
@@ -1,17 +0,0 @@
|
||||
from ec65536 import *
|
||||
|
||||
a = 124
|
||||
b = 8932
|
||||
c = 12415
|
||||
|
||||
assert galois_mul(galois_add(a, b), c) == galois_add(galois_mul(a, c), galois_mul(b, c))
|
||||
|
||||
k1 = list(range(10))
|
||||
k2 = list(range(100, 200))
|
||||
k3 = mul_polys(k1, k2)
|
||||
assert div_polys(k3, k1) == k2
|
||||
assert div_polys(k3, k2) == k1
|
||||
assert galois_mul(eval_poly_at(k1, 9999), eval_poly_at(k2, 9999)) == \
|
||||
eval_poly_at(k3, 9999)
|
||||
k4 = compose_polys(k1, k2)
|
||||
assert eval_poly_at(k4, 9998) == eval_poly_at(k1, eval_poly_at(k2, 9998))
|
||||
@@ -1,27 +0,0 @@
|
||||
import ec65536
|
||||
import rlp
|
||||
import time
|
||||
|
||||
# 12.8 kilobyte test string
|
||||
testdata = 'the cow jumped over the moon!!! ' * 400
|
||||
|
||||
t1 = time.time()
|
||||
prover = ec65536.Prover(testdata)
|
||||
t2 = time.time()
|
||||
print("Created prover in %.2f sec" % (t2 - t1))
|
||||
|
||||
assert ec65536.verify_proof(prover.merkle_root, prover.prove(13), 13)
|
||||
t3 = time.time()
|
||||
print("Created and verified a proof in %.2f sec" % (t3 - t2))
|
||||
|
||||
proofs = [prover.prove(i) for i in range(0, prover.length, 2)]
|
||||
print("Created merkle proofs")
|
||||
|
||||
t4 = time.time()
|
||||
print("Starting to attempt fill")
|
||||
response = ec65536.fill(prover.merkle_root, prover.length // 2, proofs, list(range(0, prover.length, 2)))
|
||||
t5 = time.time()
|
||||
print("Completed fill in %.2f sec" % (t5 - t4))
|
||||
assert response is not False
|
||||
assert b''.join(response)[:len(rlp.encode(testdata))] == rlp.encode(testdata)
|
||||
print("Fill successful")
|
||||
@@ -1,79 +0,0 @@
|
||||
import snappy
|
||||
|
||||
data = bytes.fromhex('0x000000000000000000000000a5548cb22dadac786972a2a91e55af6b4209563a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000f12800000000000000000000000000000000000000000000000000000000000186a0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000002540be40000000000000000000000000000000000000000000000000000000002540be4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008480c5c7d0000000000000000000000000febebb892587ecf190f3b948dd1dcb60c9679b3400000000000000000000000000000000000000000000000000038d7ea4c6800000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004172b8b9720baf0341264ec195e2550393772c222b10cff626c0863aafd98921516af2320acce7e49b6075bbb5a3ac0ae8dc9555f7e70924a53aedd57f050abe331b00000000000000000000000000000000000000000000000000000000000000'[2:])
|
||||
|
||||
def zrle_compress(inp):
|
||||
o = []
|
||||
pos = 0
|
||||
while pos < len(inp):
|
||||
zcount = 0
|
||||
while pos + zcount < len(inp) and inp[pos + zcount] == 0 and zcount < 255:
|
||||
zcount += 1
|
||||
if zcount > 0:
|
||||
pos += zcount
|
||||
o.extend([0, zcount])
|
||||
else:
|
||||
o.append(inp[pos])
|
||||
pos += 1
|
||||
return bytes(o)
|
||||
|
||||
def zrle_decompress(inp):
|
||||
o = []
|
||||
pos = 0
|
||||
while pos < len(inp):
|
||||
if inp[pos] != 0:
|
||||
o.append(inp[pos])
|
||||
pos += 1
|
||||
else:
|
||||
o.extend([0] * inp[pos + 1])
|
||||
pos += 2
|
||||
return bytes(o)
|
||||
|
||||
def ctl_compress(inp):
|
||||
assert len(inp) % 32 == 0
|
||||
o = []
|
||||
for pos in range(0, len(inp), 32):
|
||||
chunk = inp[pos: pos+32]
|
||||
l_stripped_chunk = chunk.lstrip(b'\x00')
|
||||
r_stripped_chunk = chunk.rstrip(b'\x00')
|
||||
if len(l_stripped_chunk) < len(r_stripped_chunk):
|
||||
o.append(len(l_stripped_chunk))
|
||||
o.extend(l_stripped_chunk)
|
||||
else:
|
||||
o.append(128 + len(r_stripped_chunk))
|
||||
o.extend(r_stripped_chunk)
|
||||
return bytes(o)
|
||||
|
||||
def ctl_decompress(inp):
|
||||
o = []
|
||||
pos = 0
|
||||
while pos < len(inp):
|
||||
chunk_length = inp[pos]
|
||||
stripped_chunk = inp[pos + 1: pos + 1 + (chunk_length % 128)]
|
||||
if chunk_length < 128:
|
||||
o.extend([0] * (32 - chunk_length))
|
||||
o.extend(stripped_chunk)
|
||||
else:
|
||||
o.extend(stripped_chunk)
|
||||
o.extend([0] * (160 - chunk_length))
|
||||
pos += 1 + (chunk_length % 128)
|
||||
return bytes(o)
|
||||
|
||||
def gascost(data):
|
||||
return len(data) * 16 - 12 * data.count(b'\x00')
|
||||
|
||||
print("Raw data: length {} gas cost {}".format(len(data), gascost(data)))
|
||||
|
||||
#for i in range(0, len(data), 32):
|
||||
# print(data[i:i+32].hex())
|
||||
|
||||
zdata = zrle_compress(data)
|
||||
assert data == zrle_decompress(zdata)
|
||||
print("ZRLE: length {} gas cost {}".format(len(zdata), gascost(zdata)))
|
||||
|
||||
cdata = ctl_compress(data)
|
||||
assert data == ctl_decompress(cdata)
|
||||
print("CTL: length {} gas cost {}".format(len(cdata), gascost(cdata)))
|
||||
|
||||
sdata = snappy.compress(data)
|
||||
print("Snappy: length {} gas cost {}".format(len(sdata), gascost(sdata)))
|
||||
@@ -1,224 +0,0 @@
|
||||
# Creates an object that includes convenience operations for numbers
|
||||
# and polynomials in some prime field
|
||||
class PrimeField():
|
||||
def __init__(self, modulus):
|
||||
assert pow(2, modulus, modulus) == 2
|
||||
self.modulus = modulus
|
||||
|
||||
def add(self, x, y):
|
||||
return (x+y) % self.modulus
|
||||
|
||||
def sub(self, x, y):
|
||||
return (x-y) % self.modulus
|
||||
|
||||
def mul(self, x, y):
|
||||
return (x*y) % self.modulus
|
||||
|
||||
def exp(self, x, p):
|
||||
return pow(x, p, self.modulus)
|
||||
|
||||
# Modular inverse using the extended Euclidean algorithm
|
||||
def inv(self, a):
|
||||
if a == 0:
|
||||
return 0
|
||||
lm, hm = 1, 0
|
||||
low, high = a % self.modulus, self.modulus
|
||||
while low > 1:
|
||||
r = high//low
|
||||
nm, new = hm-lm*r, high-low*r
|
||||
lm, low, hm, high = nm, new, lm, low
|
||||
return lm % self.modulus
|
||||
|
||||
def multi_inv(self, values):
|
||||
partials = [1]
|
||||
for i in range(len(values)):
|
||||
partials.append(self.mul(partials[-1], values[i] or 1))
|
||||
inv = self.inv(partials[-1])
|
||||
outputs = [0] * len(values)
|
||||
for i in range(len(values), 0, -1):
|
||||
outputs[i-1] = self.mul(partials[i-1], inv) if values[i-1] else 0
|
||||
inv = self.mul(inv, values[i-1] or 1)
|
||||
return outputs
|
||||
|
||||
def div(self, x, y):
|
||||
return self.mul(x, self.inv(y))
|
||||
|
||||
# Evaluate a polynomial at a point
|
||||
def eval_poly_at(self, p, x):
|
||||
y = 0
|
||||
power_of_x = 1
|
||||
for i, p_coeff in enumerate(p):
|
||||
y += power_of_x * p_coeff
|
||||
power_of_x = (power_of_x * x) % self.modulus
|
||||
return y % self.modulus
|
||||
|
||||
# Arithmetic for polynomials
|
||||
def add_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) + (b[i] if i < len(b) else 0))
|
||||
% self.modulus for i in range(max(len(a), len(b)))]
|
||||
|
||||
def sub_polys(self, a, b):
|
||||
return [((a[i] if i < len(a) else 0) - (b[i] if i < len(b) else 0))
|
||||
% self.modulus for i in range(max(len(a), len(b)))]
|
||||
|
||||
def mul_by_const(self, a, c):
|
||||
return [(x*c) % self.modulus for x in a]
|
||||
|
||||
def mul_polys(self, a, b):
|
||||
o = [0] * (len(a) + len(b) - 1)
|
||||
for i, aval in enumerate(a):
|
||||
for j, bval in enumerate(b):
|
||||
o[i+j] += a[i] * b[j]
|
||||
return [x % self.modulus for x in o]
|
||||
|
||||
def div_polys(self, a, b):
|
||||
assert len(a) >= len(b)
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = self.div(a[apos], b[bpos])
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff+i] -= b[i] * quot
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return [x % self.modulus for x in o]
|
||||
|
||||
def mod_polys(self, a, b):
|
||||
return self.sub_polys(a, self.mul_polys(b, self.div_polys(a, b)))[:len(b)-1]
|
||||
|
||||
# Build a polynomial from a few coefficients
|
||||
def sparse(self, coeff_dict):
|
||||
o = [0] * (max(coeff_dict.keys()) + 1)
|
||||
for k, v in coeff_dict.items():
|
||||
o[k] = v % self.modulus
|
||||
return o
|
||||
|
||||
# Build a polynomial that returns 0 at all specified xs
|
||||
def zpoly(self, xs):
|
||||
root = [1]
|
||||
for x in xs:
|
||||
root.insert(0, 0)
|
||||
for j in range(len(root)-1):
|
||||
root[j] -= root[j+1] * x
|
||||
return [x % self.modulus for x in root]
|
||||
|
||||
# Given p+1 y values and x values with no errors, recovers the original
|
||||
# p+1 degree polynomial.
|
||||
# Lagrange interpolation works roughly in the following way.
|
||||
# 1. Suppose you have a set of points, eg. x = [1, 2, 3], y = [2, 5, 10]
|
||||
# 2. For each x, generate a polynomial which equals its corresponding
|
||||
# y coordinate at that point and 0 at all other points provided.
|
||||
# 3. Add these polynomials together.
|
||||
|
||||
def lagrange_interp(self, xs, ys):
|
||||
# Generate master numerator polynomial, eg. (x - x1) * (x - x2) * ... * (x - xn)
|
||||
root = self.zpoly(xs)
|
||||
assert len(root) == len(ys) + 1
|
||||
# print(root)
|
||||
# Generate per-value numerator polynomials, eg. for x=x2,
|
||||
# (x - x1) * (x - x3) * ... * (x - xn), by dividing the master
|
||||
# polynomial back by each x coordinate
|
||||
nums = [self.div_polys(root, [-x, 1]) for x in xs]
|
||||
# Generate denominators by evaluating numerator polys at each x
|
||||
denoms = [self.eval_poly_at(nums[i], xs[i]) for i in range(len(xs))]
|
||||
invdenoms = self.multi_inv(denoms)
|
||||
# Generate output polynomial, which is the sum of the per-value numerator
|
||||
# polynomials rescaled to have the right y values
|
||||
b = [0 for y in ys]
|
||||
for i in range(len(xs)):
|
||||
yslice = self.mul(ys[i], invdenoms[i])
|
||||
for j in range(len(ys)):
|
||||
if nums[i][j] and ys[i]:
|
||||
b[j] += nums[i][j] * yslice
|
||||
return [x % self.modulus for x in b]
|
||||
|
||||
def lagrange_polys(self, xs):
|
||||
ps = []
|
||||
for x in xs:
|
||||
p = [1]
|
||||
for x2 in xs:
|
||||
if x != x2:
|
||||
denominator = self.inv(x - x2)
|
||||
p = self.mul_polys(p, [- x2 * denominator, denominator])
|
||||
ps.append(p)
|
||||
return ps
|
||||
|
||||
def zero_poly(self, xs):
|
||||
p = [1]
|
||||
for x in xs:
|
||||
p = self.mul_polys(p, [-x, 1])
|
||||
return p
|
||||
|
||||
# Optimized poly evaluation for degree 4
|
||||
def eval_quartic(self, p, x):
|
||||
xsq = x * x % self.modulus
|
||||
xcb = xsq * x
|
||||
return (p[0] + p[1] * x + p[2] * xsq + p[3] * xcb) % self.modulus
|
||||
|
||||
# Optimized version of the above restricted to deg-4 polynomials
|
||||
def lagrange_interp_4(self, xs, ys):
|
||||
x01, x02, x03, x12, x13, x23 = \
|
||||
xs[0] * xs[1], xs[0] * xs[2], xs[0] * xs[3], xs[1] * xs[2], xs[1] * xs[3], xs[2] * xs[3]
|
||||
m = self.modulus
|
||||
eq0 = [-x12 * xs[3] % m, (x12 + x13 + x23), -xs[1]-xs[2]-xs[3], 1]
|
||||
eq1 = [-x02 * xs[3] % m, (x02 + x03 + x23), -xs[0]-xs[2]-xs[3], 1]
|
||||
eq2 = [-x01 * xs[3] % m, (x01 + x03 + x13), -xs[0]-xs[1]-xs[3], 1]
|
||||
eq3 = [-x01 * xs[2] % m, (x01 + x02 + x12), -xs[0]-xs[1]-xs[2], 1]
|
||||
e0 = self.eval_poly_at(eq0, xs[0])
|
||||
e1 = self.eval_poly_at(eq1, xs[1])
|
||||
e2 = self.eval_poly_at(eq2, xs[2])
|
||||
e3 = self.eval_poly_at(eq3, xs[3])
|
||||
e01 = e0 * e1
|
||||
e23 = e2 * e3
|
||||
invall = self.inv(e01 * e23)
|
||||
inv_y0 = ys[0] * invall * e1 * e23 % m
|
||||
inv_y1 = ys[1] * invall * e0 * e23 % m
|
||||
inv_y2 = ys[2] * invall * e01 * e3 % m
|
||||
inv_y3 = ys[3] * invall * e01 * e2 % m
|
||||
return [(eq0[i] * inv_y0 + eq1[i] * inv_y1 + eq2[i] * inv_y2 + eq3[i] * inv_y3) % m for i in range(4)]
|
||||
|
||||
# Optimized version of the above restricted to deg-2 polynomials
|
||||
def lagrange_interp_2(self, xs, ys):
|
||||
m = self.modulus
|
||||
eq0 = [-xs[1] % m, 1]
|
||||
eq1 = [-xs[0] % m, 1]
|
||||
e0 = self.eval_poly_at(eq0, xs[0])
|
||||
e1 = self.eval_poly_at(eq1, xs[1])
|
||||
invall = self.inv(e0 * e1)
|
||||
inv_y0 = ys[0] * invall * e1
|
||||
inv_y1 = ys[1] * invall * e0
|
||||
return [(eq0[i] * inv_y0 + eq1[i] * inv_y1) % m for i in range(2)]
|
||||
|
||||
# Optimized version of the above restricted to deg-4 polynomials
|
||||
def multi_interp_4(self, xsets, ysets):
|
||||
data = []
|
||||
invtargets = []
|
||||
for xs, ys in zip(xsets, ysets):
|
||||
x01, x02, x03, x12, x13, x23 = \
|
||||
xs[0] * xs[1], xs[0] * xs[2], xs[0] * xs[3], xs[1] * xs[2], xs[1] * xs[3], xs[2] * xs[3]
|
||||
m = self.modulus
|
||||
eq0 = [-x12 * xs[3] % m, (x12 + x13 + x23), -xs[1]-xs[2]-xs[3], 1]
|
||||
eq1 = [-x02 * xs[3] % m, (x02 + x03 + x23), -xs[0]-xs[2]-xs[3], 1]
|
||||
eq2 = [-x01 * xs[3] % m, (x01 + x03 + x13), -xs[0]-xs[1]-xs[3], 1]
|
||||
eq3 = [-x01 * xs[2] % m, (x01 + x02 + x12), -xs[0]-xs[1]-xs[2], 1]
|
||||
e0 = self.eval_quartic(eq0, xs[0])
|
||||
e1 = self.eval_quartic(eq1, xs[1])
|
||||
e2 = self.eval_quartic(eq2, xs[2])
|
||||
e3 = self.eval_quartic(eq3, xs[3])
|
||||
data.append([ys, eq0, eq1, eq2, eq3])
|
||||
invtargets.extend([e0, e1, e2, e3])
|
||||
invalls = self.multi_inv(invtargets)
|
||||
o = []
|
||||
for (i, (ys, eq0, eq1, eq2, eq3)) in enumerate(data):
|
||||
invallz = invalls[i*4:i*4+4]
|
||||
inv_y0 = ys[0] * invallz[0] % m
|
||||
inv_y1 = ys[1] * invallz[1] % m
|
||||
inv_y2 = ys[2] * invallz[2] % m
|
||||
inv_y3 = ys[3] * invallz[3] % m
|
||||
o.append([(eq0[i] * inv_y0 + eq1[i] * inv_y1 + eq2[i] * inv_y2 + eq3[i] * inv_y3) % m for i in range(4)])
|
||||
# assert o == [self.lagrange_interp_4(xs, ys) for xs, ys in zip(xsets, ysets)]
|
||||
return o
|
||||
@@ -1,218 +0,0 @@
|
||||
import blst
|
||||
import hashlib
|
||||
from poly_utils import PrimeField
|
||||
from time import time
|
||||
import sys
|
||||
import gmpy2
|
||||
|
||||
|
||||
#
|
||||
# Proof of concept implementation for Eth1 simple custody
|
||||
#
|
||||
# https://notes.ethereum.org/1Rn2MwsoSWuEUHTnaRgLcw
|
||||
#
|
||||
|
||||
# BLS12_381 curve modulus
|
||||
MODULUS = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001
|
||||
|
||||
primefield = PrimeField(MODULUS)
|
||||
|
||||
# Proof of custody parameters
|
||||
N = 15 # bits
|
||||
|
||||
DOMAIN = list(range(N))
|
||||
|
||||
def hash(x):
|
||||
if isinstance(x, bytes):
|
||||
return hashlib.sha256(x).digest()
|
||||
elif isinstance(x, blst.P1):
|
||||
return hash(x.compress())
|
||||
elif isinstance(x, int):
|
||||
return hash(x.to_bytes(32, "little"))
|
||||
b = b""
|
||||
for a in x:
|
||||
if isinstance(a, bytes):
|
||||
b += a
|
||||
elif isinstance(a, int):
|
||||
b += a.to_bytes(32, "little")
|
||||
elif isinstance(a, blst.P1):
|
||||
b += hash(a.compress())
|
||||
return hash(b)
|
||||
|
||||
C_CONSTANTS = [int.from_bytes(hash(i), "little") for i in range(N)]
|
||||
D_CONSTANTS = [int.from_bytes(hash(i + N), "little") for i in range(N)]
|
||||
|
||||
def legendre(x):
|
||||
return gmpy2.jacobi(x, MODULUS)
|
||||
|
||||
def mod_sqrt(a):
|
||||
assert legendre(a) == 1
|
||||
|
||||
# Factor p-1 on the form q * 2^s (with Q odd)
|
||||
q, s = MODULUS - 1, 0
|
||||
while q % 2 == 0:
|
||||
s += 1
|
||||
q //= 2
|
||||
|
||||
# Select a z which is a quadratic non residue
|
||||
z = 1
|
||||
while legendre(z) != -1:
|
||||
z += 1
|
||||
c = pow(z, q, MODULUS)
|
||||
|
||||
# Search for a solution
|
||||
x = pow(a, (q + 1) // 2, MODULUS)
|
||||
t = pow(a, q, MODULUS)
|
||||
m = s
|
||||
while t != 1:
|
||||
# Find the lowest i such that t^(2^i) = 1
|
||||
i, e = 0, 2
|
||||
for i in range(1, m):
|
||||
if pow(t, e, MODULUS) == 1:
|
||||
break
|
||||
e *= 2
|
||||
|
||||
# Update next value to iterate
|
||||
b = pow(c, 2**(m - i - 1), MODULUS)
|
||||
x = (x * b) % MODULUS
|
||||
t = (t * b * b) % MODULUS
|
||||
c = (b * b) % MODULUS
|
||||
m = i
|
||||
|
||||
assert (x ** 2 - a) % MODULUS == 0
|
||||
return x
|
||||
|
||||
def is_valid_custody_value(secret_key, custody_value):
|
||||
for i in range(N):
|
||||
if legendre(secret_key + C_CONSTANTS[i] * custody_value + D_CONSTANTS[i]) != 1:
|
||||
return False
|
||||
return True
|
||||
|
||||
def lincomb_naive(group_elements, factors, start_value = blst.G1().mult(0)):
|
||||
"""
|
||||
Direct linear combination
|
||||
"""
|
||||
assert len(group_elements) == len(factors)
|
||||
result = start_value.dup()
|
||||
for g, f in zip(group_elements, factors):
|
||||
result.add(g.dup().mult(f))
|
||||
return result
|
||||
|
||||
def generate_setup(N, secret):
|
||||
"""
|
||||
Generates a setup in the G1 group and G2 group, as well as the Lagrange polynomials in G1 (via FFT)
|
||||
"""
|
||||
g1_setup = [blst.G1().mult(pow(secret, i, MODULUS)) for i in range(N + 1)]
|
||||
g2_setup = [blst.G2().mult(pow(secret, i, MODULUS)) for i in range(N + 1)]
|
||||
lagrange_polys = primefield.lagrange_polys(list(range(N)))
|
||||
g1_lagrange = [lincomb_naive(g1_setup[:N], p) for p in lagrange_polys]
|
||||
g2_lagrange = [lincomb_naive(g2_setup[:N], p, start_value=blst.G2().mult(0)) for p in lagrange_polys]
|
||||
g2_zero = lincomb_naive(g2_setup, primefield.zero_poly(list(range(N))), start_value=blst.G2().mult(0))
|
||||
g2_one = lincomb_naive(g2_lagrange, [1] * N, start_value=blst.G2().mult(0))
|
||||
return {"g1": g1_setup, "g2": g2_setup, "g1_lagrange": g1_lagrange, "g2_zero": g2_zero, "g2_one": g2_one}
|
||||
|
||||
def compute_proof(setup, secret_key, custody_value):
|
||||
values = [secret_key + C_CONSTANTS[i] * custody_value + D_CONSTANTS[i] for i in range(N)]
|
||||
square_roots = [mod_sqrt(value) for value in values]
|
||||
d = primefield.lagrange_interp(list(range(N)), square_roots)
|
||||
D = lincomb_naive(setup["g1"][:N], d)
|
||||
E = lincomb_naive(setup["g2"][:N], d, start_value=blst.G2().mult(0))
|
||||
|
||||
q = primefield.div_polys(primefield.mul_polys(d, d), primefield.zero_poly(list(range(N))))
|
||||
Pi = lincomb_naive(setup["g1"][:N - 1], q)
|
||||
return D.compress(), E.compress(), Pi.compress()
|
||||
|
||||
def check_proof_simple(setup, public_key_serialized, custody_value, proof):
|
||||
D_serialized, E_serialized, Pi_serialized = proof
|
||||
D = blst.P1(D_serialized)
|
||||
E = blst.P2(E_serialized)
|
||||
Pi = blst.P1(Pi_serialized)
|
||||
public_key = blst.P1(public_key_serialized)
|
||||
|
||||
b_values = [C_CONSTANTS[i] * custody_value + D_CONSTANTS[i] for i in range(N)]
|
||||
B = lincomb_naive(setup["g1_lagrange"], b_values)
|
||||
C = public_key.dup().add(B)
|
||||
|
||||
pairing = blst.PT(blst.G2().to_affine(), D.to_affine())
|
||||
pairing.mul(blst.PT(E.to_affine(), blst.G1().neg().to_affine()))
|
||||
if not pairing.final_exp().is_one():
|
||||
return False
|
||||
|
||||
pairing = blst.PT(E.to_affine(), D.dup().neg().to_affine())
|
||||
pairing.mul(blst.PT(setup["g2_zero"].to_affine(), Pi.to_affine()))
|
||||
pairing.mul(blst.PT(blst.G2().to_affine(), C.to_affine()))
|
||||
if not pairing.final_exp().is_one():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def check_proof_combined(setup, public_key_serialized, custody_value, proof):
|
||||
D_serialized, E_serialized, Pi_serialized = proof
|
||||
D = blst.P1(D_serialized)
|
||||
E = blst.P2(E_serialized)
|
||||
Pi = blst.P1(Pi_serialized)
|
||||
|
||||
r = int.from_bytes(hash(list(proof) + [public_key_serialized, custody_value]), "little") % MODULUS
|
||||
r2 = r * r % MODULUS
|
||||
|
||||
public_key = blst.P1(public_key_serialized)
|
||||
|
||||
b_values = [C_CONSTANTS[i] * custody_value + D_CONSTANTS[i] for i in range(N)]
|
||||
B = lincomb_naive(setup["g1_lagrange"], b_values)
|
||||
C = public_key.dup().add(B)
|
||||
|
||||
pairing = blst.PT(blst.G2().mult(r).add(E).to_affine(), D.dup().neg().to_affine())
|
||||
pairing.mul(blst.PT(E.to_affine(), blst.G1().mult(r).to_affine()))
|
||||
pairing.mul(blst.PT(blst.G2().to_affine(), C.to_affine()))
|
||||
pairing.mul(blst.PT(setup["g2_zero"].to_affine(), Pi.to_affine()))
|
||||
|
||||
return pairing.final_exp().is_one()
|
||||
|
||||
|
||||
def get_proof_size(proof):
|
||||
return sum(len(x) for x in proof)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
time_a = time()
|
||||
setup = generate_setup(N, 8927347823478352432985)
|
||||
time_b = time()
|
||||
|
||||
print("Computed setup in {0:.3f} ms".format(1000*(time_b - time_a)), file=sys.stderr)
|
||||
|
||||
secret_key = pow(523487, 253478, MODULUS) + 1
|
||||
public_key = blst.G1().mult(secret_key).compress()
|
||||
|
||||
time_a = time()
|
||||
custody_value = 876354679
|
||||
values_tried = 1
|
||||
while not is_valid_custody_value(secret_key, custody_value):
|
||||
custody_value += 1
|
||||
values_tried += 1
|
||||
time_b = time()
|
||||
|
||||
print("Found custody value in {0:.3f} ms after {1} tries".format(1000*(time_b - time_a), values_tried), file=sys.stderr)
|
||||
|
||||
time_a = time()
|
||||
proof = compute_proof(setup, secret_key, custody_value)
|
||||
time_b = time()
|
||||
|
||||
proof_size = get_proof_size(proof)
|
||||
|
||||
print("Computed proof (size = {0} bytes) in {1:.3f} ms".format(proof_size, 1000*(time_b - time_a)), file=sys.stderr)
|
||||
|
||||
time_a = time()
|
||||
assert check_proof_simple(setup, public_key, custody_value, proof)
|
||||
time_b = time()
|
||||
check_time = time_b - time_a
|
||||
|
||||
print("Checked proof in {0:.3f} ms".format(1000*(time_b - time_a)), file=sys.stderr)
|
||||
|
||||
time_a = time()
|
||||
assert check_proof_combined(setup, public_key, custody_value, proof)
|
||||
time_b = time()
|
||||
check_time = time_b - time_a
|
||||
|
||||
print("Checked proof (optimized/combined pairing) in {0:.3f} ms".format(1000*(time_b - time_a)), file=sys.stderr)
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
from numpy.random import poisson
|
||||
import math
|
||||
|
||||
# Target active staker size
|
||||
TARGET_AMOUNT_STAKING = 312500
|
||||
# Average time staking before withdrawal
|
||||
AVG_STAKING_TIME = 500
|
||||
# How many withdrawals are permitted in
|
||||
# one day given a certain validator count?
|
||||
def withdrawals_per_day(validators, total_eth_exiting):
|
||||
# return (validators + total_eth_exiting) / 1.07 // 100
|
||||
return validators // 100
|
||||
# return validators * max(1, int(math.log2(total_eth_exiting))) / 13.5 // 100
|
||||
# return int(1 + (total_eth_exiting * validators)**0.5) * 4.9 // 100
|
||||
|
||||
# Get the size of the largest staker. This assumes a
|
||||
# Zipf's law distribution (ie. power law with power=1)
|
||||
# where the nth largest staker is n times smaller than the
|
||||
# largest staker. Calculates a value for the largest staker
|
||||
# such that the total size of nonzero stakers equals the
|
||||
# target amount staking.
|
||||
def get_max_staker_size():
|
||||
def get_sum(sz):
|
||||
tot = 0
|
||||
inc = 1
|
||||
while sz // inc:
|
||||
tot += (sz // inc) * inc
|
||||
inc *= 2
|
||||
return tot
|
||||
size = 0
|
||||
offset = TARGET_AMOUNT_STAKING
|
||||
while offset:
|
||||
if get_sum(size + offset) < TARGET_AMOUNT_STAKING:
|
||||
size += offset
|
||||
else:
|
||||
offset //= 2
|
||||
return size
|
||||
|
||||
# As a simplification, we make all stakers have validator sizes
|
||||
# be close to the max size divided by a power of two
|
||||
STAKER_SIZES = [get_max_staker_size()]
|
||||
|
||||
while STAKER_SIZES[-1] > 1:
|
||||
STAKER_SIZES.append(STAKER_SIZES[-1] // 2)
|
||||
|
||||
# Active and not yet exiting stakers
|
||||
stakers = {}
|
||||
# Exiting stakers
|
||||
exiting = {}
|
||||
|
||||
# The exit queue
|
||||
exit_queue = []
|
||||
# Total eth exiting
|
||||
total_eth_exiting = 0
|
||||
# How much of the first exiter's deposit we have processed
|
||||
processing_current = 0
|
||||
|
||||
# Fill the staker set initially
|
||||
for i, sz in enumerate(STAKER_SIZES):
|
||||
stakers[sz] = poisson(2**i)
|
||||
sz //= 2
|
||||
|
||||
# Count withdrawn stakers of each size, and total delays
|
||||
# incurred by them, so we can eventually compute the average
|
||||
withdrawn = {}
|
||||
tot_delays = {}
|
||||
|
||||
print("Total staking ETH:", sum(k * v for k,v in stakers.items()))
|
||||
|
||||
for day in range(10000):
|
||||
# Deposit new stakers at the rate needed to maintain the equilibrium size
|
||||
for i, sz in enumerate(STAKER_SIZES):
|
||||
stakers[sz] = stakers.get(sz, 0) + poisson(2**i / AVG_STAKING_TIME)
|
||||
sz //= 2
|
||||
|
||||
# Each staker has a 1/AVG_STAKING_TIME probability of deciding to leave each day
|
||||
for k in stakers.keys():
|
||||
exit_count = min(poisson(stakers[k] / AVG_STAKING_TIME), stakers[k])
|
||||
if exit_count > 0:
|
||||
exit_queue.append((k, exit_count, day))
|
||||
stakers[k] -= exit_count
|
||||
exiting[k] = exiting.get(k, 0) + exit_count
|
||||
total_eth_exiting += exit_count * k
|
||||
total_validators = sum(k * v for k,v in stakers.items()) + sum(k * v for k,v in exiting.items())
|
||||
|
||||
# Process the queue
|
||||
queue_to_empty_today = withdrawals_per_day(total_validators, total_eth_exiting)
|
||||
while queue_to_empty_today > 0 and len(exit_queue) > 0:
|
||||
key, exit_count, exit_day = exit_queue[0]
|
||||
# Partially process the first exiter (exit next loop)
|
||||
if key * exit_count > queue_to_empty_today + processing_current:
|
||||
processing_current += queue_to_empty_today
|
||||
queue_to_empty_today = 0
|
||||
# Finish processing the first exiter (continue next loop)
|
||||
else:
|
||||
processing_current = 0
|
||||
queue_to_empty_today -= key * exit_count - processing_current
|
||||
exit_queue.pop(0)
|
||||
exiting[key] -= exit_count
|
||||
total_eth_exiting -= exit_count * key
|
||||
withdrawn[key] = withdrawn.get(key, 0) + exit_count
|
||||
tot_delays[key] = tot_delays.get(key, 0) + (day - exit_day) * exit_count
|
||||
if day % 1000 == 999:
|
||||
print("Report for day %d: %d total validators %d ETH in exit queue" % ((day+1), total_validators, total_eth_exiting))
|
||||
|
||||
print("Total delays in days")
|
||||
for key in STAKER_SIZES:
|
||||
print("%d: % .3f (min %.3f)" % (key, (tot_delays.get(key, 0) / withdrawn.get(key, 0.0001)), key / withdrawals_per_day(TARGET_AMOUNT_STAKING, key)))
|
||||
@@ -1,131 +0,0 @@
|
||||
import copy, os, random, binascii
|
||||
|
||||
zero_hash = '00000000'
|
||||
|
||||
def new_hash():
|
||||
return binascii.hexlify(os.urandom(4)).decode('utf-8')
|
||||
|
||||
# Account state record
|
||||
class Account():
|
||||
def __init__(self, yes_dep, no_deps, balance):
|
||||
# This state is conditional on this dependency being CORRECT
|
||||
self.yes_dep = yes_dep
|
||||
# This state is conditional on these dependencies being INCORRECT
|
||||
self.no_deps = no_deps
|
||||
# Account balance
|
||||
self.balance = balance
|
||||
|
||||
def __repr__(self):
|
||||
return "[yes_dep: %r, no_deps: %r, balance: %d]" % (self.yes_dep, self.no_deps, self.balance)
|
||||
|
||||
# Dependency object: (hash, height)
|
||||
class Dependency():
|
||||
def __init__(self, height, hash):
|
||||
self.height = height
|
||||
self.hash = hash
|
||||
|
||||
def __repr__(self):
|
||||
return "[height: %d, hash: %s]" % (self.height, self.hash)
|
||||
|
||||
# Test with 5 accounts
|
||||
actors = ["Alice", "Bob", "Charlie", "David", "Epsie"]
|
||||
|
||||
# Initial empty state
|
||||
state = {a: Account(Dependency(0, zero_hash), [], 100) for a in actors}
|
||||
|
||||
# The set of dependencies that the protocol thinks is most likely to be correct
|
||||
# Think of these as being receipt roots of all shards, with the i'th hash
|
||||
# corresponding to the receipt root from the i'th slot. This list gets extended
|
||||
# as the protocol finds out about new dependencies, and dependencies can get popped
|
||||
# off the end if the protocol realizes that they are incorrect
|
||||
deps = [zero_hash]
|
||||
|
||||
# Alternate states corresponding to dependencies other than those the protocol thinks
|
||||
# is most likely to be correct. In a real implementation these can be stored as
|
||||
# receipts rather than state objects if desired
|
||||
alt_states = {a: [] for a in actors}
|
||||
|
||||
# Does the current account state match the dependencies?
|
||||
def is_account_state_active(account):
|
||||
for dep in account.no_deps:
|
||||
if (dep.height < len(deps) and dep.hash == deps[dep.height]):
|
||||
return False
|
||||
yes_dep_ht = account.yes_dep.height
|
||||
if yes_dep_ht >= len(deps) or account.yes_dep.hash != deps[yes_dep_ht]:
|
||||
return False
|
||||
return True
|
||||
|
||||
# Set the main account state to be the correct one
|
||||
def reorg(address):
|
||||
if is_account_state_active(state[address]):
|
||||
return
|
||||
for i,r in enumerate(alt_states[address]):
|
||||
if is_account_state_active(r):
|
||||
state[address], alt_states[address][i] = alt_states[address][i], state[address]
|
||||
return
|
||||
raise Exception("wtf m8")
|
||||
|
||||
# Adjusts balance of an account. UNSAFE unless checks are done!
|
||||
def balance_delta(to, value, height, hash):
|
||||
if height > state[to].yes_dep.height:
|
||||
alt_states[to].append(Account(
|
||||
state[to].yes_dep,
|
||||
state[to].no_deps + [Dependency(height, hash)],
|
||||
state[to].balance
|
||||
))
|
||||
state[to].yes_dep = Dependency(height, hash)
|
||||
state[to].balance += value
|
||||
assert state[to].balance >= 0
|
||||
|
||||
# Transfer from an account (or from the outside) to another account
|
||||
def transfer(frm, to, value, height, hash):
|
||||
# print("<------- Xferring", frm, to, value, height, hash)
|
||||
if len(deps) <= height or deps[height] != hash:
|
||||
return False
|
||||
if not (is_account_state_active(state[to]) and height >= state[to].yes_dep.height):
|
||||
return False
|
||||
if frm is not None and not (is_account_state_active(state[frm]) and height >= state[frm].yes_dep.height):
|
||||
return False
|
||||
balance_delta(to, value, height, hash)
|
||||
if frm is not None:
|
||||
balance_delta(frm, -value, height, hash)
|
||||
return True
|
||||
|
||||
print("Starting balance: %d" % state["Charlie"].balance)
|
||||
|
||||
# Run the main test....
|
||||
for i in range(200):
|
||||
r = random.random()
|
||||
# 16% chance: new dependency
|
||||
if r < 0.16:
|
||||
deps.append(new_hash())
|
||||
# 20% chance: xfer from another shard
|
||||
elif r < 0.36:
|
||||
to = random.choice(actors)
|
||||
reorg(to)
|
||||
value = random.randrange(100)
|
||||
assert transfer(None, to, value, len(deps)-1, deps[-1])
|
||||
if to == "Charlie":
|
||||
print("Received %d coins, new balance %d, conditional on (%d, %s)" % (value, state[to].balance, len(deps)-1, deps[-1]))
|
||||
# 60% chance: xfer between two accounts inside the shard
|
||||
elif r < 0.96:
|
||||
to = random.choice(actors)
|
||||
frm = random.choice([x for x in actors if x != to])
|
||||
reorg(to)
|
||||
reorg(frm)
|
||||
common_ht = max(state[to].yes_dep.height, state[frm].yes_dep.height)
|
||||
value = random.randrange(state[frm].balance + 1)
|
||||
assert transfer(frm, to, value, common_ht, deps[common_ht])
|
||||
if frm == "Charlie":
|
||||
print("Sent %d coins, new balance %d, conditional on (%d, %s)" % (value, state[frm].balance, common_ht, deps[common_ht]))
|
||||
if to == "Charlie":
|
||||
print("Received %d coins from %s, new balance %d, conditional on (%d, %s)" % (value, frm, state[to].balance, common_ht, deps[common_ht]))
|
||||
# 4% chance: revert some dependencies
|
||||
else:
|
||||
num_to_revert = min(random.randrange(8), len(deps) - 1)
|
||||
for i in range(num_to_revert):
|
||||
print("Reverted (%d, %s)" % (len(deps)-1, deps.pop()))
|
||||
for i in range(num_to_revert):
|
||||
deps.append(new_hash())
|
||||
reorg("Charlie")
|
||||
print("New balance: %d" % state["Charlie"].balance)
|
||||
@@ -1,131 +0,0 @@
|
||||
import random, time, sys, math
|
||||
|
||||
# For each subset in `subsets` (provided as a list of indices into `numbers`),
|
||||
# compute the sum of that subset of `numbers`. More efficient than the naive method.
|
||||
def multisubset(numbers, subsets, adder=lambda x,y: x+y, zero=0):
|
||||
numbers = numbers[::]
|
||||
subsets = {i: {x for x in subset} for i, subset in enumerate(subsets)}
|
||||
output = [zero for _ in range(len(subsets))]
|
||||
|
||||
for roundcount in range(9999999):
|
||||
# Compute counts of every pair of indices in the subset list
|
||||
pair_count = {}
|
||||
for index, subset in subsets.items():
|
||||
for x in subset:
|
||||
for y in subset:
|
||||
if y > x:
|
||||
pair_count[(x, y)] = pair_count.get((x, y), 0) + 1
|
||||
|
||||
# Determine pairs with highest count. The cutoff parameter [:len(numbers)]
|
||||
# determines a tradeoff between group operation count and other forms of overhead
|
||||
pairs_by_count = sorted([el for el in pair_count.keys()], key=lambda el: pair_count[el], reverse=True)[:len(numbers)*int(math.log(len(numbers)))]
|
||||
|
||||
# Exit condition: all subsets have size 1, no pairs
|
||||
if len(pairs_by_count) == 0:
|
||||
for key, subset in subsets.items():
|
||||
for index in subset:
|
||||
output[key] = adder(output[key], numbers[index])
|
||||
return output
|
||||
|
||||
# In each of the highest-count pairs, take the sum of the numbers at those indices,
|
||||
# and add the result as a new value, and modify `subsets` to include the new value
|
||||
# wherever possible
|
||||
used = set()
|
||||
for maxx, maxy in pairs_by_count:
|
||||
if maxx in used or maxy in used:
|
||||
continue
|
||||
used.add(maxx)
|
||||
used.add(maxy)
|
||||
numbers.append(adder(numbers[maxx], numbers[maxy]))
|
||||
for key, subset in list(subsets.items()):
|
||||
if maxx in subset and maxy in subset:
|
||||
subset.remove(maxx)
|
||||
subset.remove(maxy)
|
||||
if not subset:
|
||||
output[key] = numbers[-1]
|
||||
del subsets[key]
|
||||
else:
|
||||
subset.add(len(numbers)-1)
|
||||
|
||||
# Alternative algorithm. Less optimal than the above, but much lower bit twiddling
|
||||
# overhead and much simpler.
|
||||
def multisubset2(numbers, subsets, adder=lambda x,y: x+y, zero=0):
|
||||
# Split up the numbers into partitions
|
||||
partition_size = 1 + int(math.log(len(subsets) + 1))
|
||||
# Align number count to partition size (for simplicity)
|
||||
numbers = numbers[::]
|
||||
while len(numbers) % partition_size != 0:
|
||||
numbers.append(zero)
|
||||
# Compute power set for each partition (eg. a, b, c -> {0, a, b, a+b, c, a+c, b+c, a+b+c})
|
||||
power_sets = []
|
||||
for i in range(0, len(numbers), partition_size):
|
||||
new_power_set = [zero]
|
||||
for dimension, value in enumerate(numbers[i:i+partition_size]):
|
||||
new_power_set += [adder(n, value) for n in new_power_set]
|
||||
power_sets.append(new_power_set)
|
||||
# Compute subset sums, using elements from power set for each range of values
|
||||
# ie. with a single power set lookup you can get the sum of _all_ elements in
|
||||
# the range partition_size*k...partition_size*(k+1) that are in that subset
|
||||
subset_sums = []
|
||||
for subset in subsets:
|
||||
o = zero
|
||||
for i in range(len(power_sets)):
|
||||
index_in_power_set = 0
|
||||
for j in range(partition_size):
|
||||
if i * partition_size + j in subset:
|
||||
index_in_power_set += 2 ** j
|
||||
o = adder(o, power_sets[i][index_in_power_set])
|
||||
subset_sums.append(o)
|
||||
return subset_sums
|
||||
|
||||
# Reduces a linear combination `numbers[0] * factors[0] + numbers[1] * factors[1] + ...`
|
||||
# into a multi-subset problem, and computes the result efficiently
|
||||
def lincomb(numbers, factors, adder=lambda x,y: x+y, zero=0):
|
||||
# Maximum bit length of a number; how many subsets we need to make
|
||||
maxbitlen = max(len(bin(f))-2 for f in factors)
|
||||
# Compute the subsets: the ith subset contains the numbers whose corresponding factor
|
||||
# has a 1 at the ith bit
|
||||
subsets = [{i for i in range(len(numbers)) if factors[i] & (1 << j)} for j in range(maxbitlen+1)]
|
||||
subset_sums = multisubset(numbers, subsets, adder=adder, zero=zero)
|
||||
# For example, suppose a value V has factor 6 (011 in increasing-order binary). Subset 0
|
||||
# will not have V, subset 1 will, and subset 2 will. So if we multiply the output of adding
|
||||
# subset 0 with twice the output of adding subset 1, with four times the output of adding
|
||||
# subset 2, then V will be represented 0 + 2 + 4 = 6 times. This reasoning applies for every
|
||||
# value. So `subset_0_sum + 2 * subset_1_sum + 4 * subset_2_sum` gives us the result we want.
|
||||
# Here, we compute this as `((subset_2_sum * 2) + subset_1_sum) * 2 + subset_0_sum` for
|
||||
# efficiency: an extra `maxbitlen * 2` group operations.
|
||||
o = zero
|
||||
for i in range(len(subsets)-1, -1, -1):
|
||||
o = adder(adder(o, o), subset_sums[i])
|
||||
return o
|
||||
|
||||
# Tests go here
|
||||
def make_mock_adder():
|
||||
counter = [0]
|
||||
def adder(x, y):
|
||||
if x and y:
|
||||
counter[0] += 1
|
||||
return x+y
|
||||
return adder, counter
|
||||
|
||||
def test_multisubset(numcount, setcount):
|
||||
numbers = [random.randrange(10**20) for _ in range(numcount)]
|
||||
subsets = [{i for i in range(numcount) if random.randrange(2)} for i in range(setcount)]
|
||||
adder, counter = make_mock_adder()
|
||||
o = multisubset(numbers, subsets, adder=adder)
|
||||
for output, subset in zip(o, subsets):
|
||||
assert output == sum([numbers[x] for x in subset])
|
||||
|
||||
def test_lincomb(numcount, bitlength=256):
|
||||
numbers = [random.randrange(10**20) for _ in range(numcount)]
|
||||
factors = [random.randrange(2**bitlength) for _ in range(numcount)]
|
||||
adder, counter = make_mock_adder()
|
||||
o = lincomb(numbers, factors, adder=adder)
|
||||
assert o == sum([n*f for n,f in zip(numbers, factors)])
|
||||
total_ones = sum(bin(f).count('1') for f in factors)
|
||||
print("Naive operation count: %d" % (bitlength * numcount + total_ones))
|
||||
print("Optimized operation count: %d" % (bitlength * 2 + counter[0]))
|
||||
print("Optimization factor: %.2f" % ((bitlength * numcount + total_ones) / (bitlength * 2 + counter[0])))
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_lincomb(int(sys.argv[1]) if len(sys.argv) >= 2 else 80)
|
||||
@@ -1,258 +0,0 @@
|
||||
import leveldb
|
||||
import hashlib
|
||||
import sys
|
||||
|
||||
ZERO = b'\x00'*32
|
||||
|
||||
|
||||
def hash(x):
|
||||
return hashlib.sha256(x).digest()
|
||||
|
||||
|
||||
def hash_to_display_form(h):
|
||||
return '0x'+h.hex()[:12] if h else None
|
||||
|
||||
|
||||
def get_common_prefix(a, b):
|
||||
L = 0
|
||||
while L < len(a) and L < len(b) and a[L] == b[L]:
|
||||
L += 1
|
||||
return a[:L]
|
||||
|
||||
|
||||
class LeafNode():
|
||||
|
||||
def __init__(self, key, value):
|
||||
assert len(key) == 32
|
||||
self.key, self.value = key, value
|
||||
|
||||
def serialize(self):
|
||||
return b'L' + self.key + self.value
|
||||
|
||||
def __repr__(self):
|
||||
return "Leaf[Key={}, value={}, hash={}]".format(self.key.rstrip(b'\x00'), self.value.rstrip(b'\x00'), hash_to_display_form(hash_node(self)))
|
||||
|
||||
|
||||
class BranchNode():
|
||||
|
||||
def __init__(self, values={}):
|
||||
if isinstance(values, list):
|
||||
assert len(values) == 256
|
||||
self.values = values
|
||||
elif isinstance(values, dict):
|
||||
self.values = [ZERO] * 256
|
||||
for k, v in values.items():
|
||||
self.values[k] = v
|
||||
else:
|
||||
raise Exception("Invalid values for branch node")
|
||||
|
||||
def serialize(self):
|
||||
if self.values.count(ZERO) == 256:
|
||||
return None
|
||||
else:
|
||||
return b'B' + b''.join(self.values)
|
||||
|
||||
def __repr__(self):
|
||||
if self.values.count(ZERO) == 256:
|
||||
return "BranchNode[]"
|
||||
o = "BranchNode["
|
||||
for i in range(256):
|
||||
if self.values[i] != ZERO:
|
||||
o += '{}: {}, '.format(chr(i), hash_to_display_form(self.values[i]))
|
||||
o += 'hash=' + hash_to_display_form(hash_node(self)) + ']'
|
||||
return o
|
||||
|
||||
|
||||
def deserialize(node):
|
||||
if node is None:
|
||||
return None
|
||||
elif node[0] == ord(b'L'):
|
||||
return LeafNode(node[1:33], node[33:])
|
||||
elif node[0] == ord(b'B'):
|
||||
assert len(node) == 1 + 32 * 256
|
||||
return BranchNode([node[32*i+1:32*i+33] for i in range(256)])
|
||||
else:
|
||||
raise Exception("Broken node in DB: {}".format(node))
|
||||
|
||||
|
||||
def hash_node(node):
|
||||
if isinstance(node, LeafNode):
|
||||
return hash(node.serialize())
|
||||
elif isinstance(node, BranchNode):
|
||||
# replace with Kate commitment
|
||||
serialized = node.serialize()
|
||||
return None if serialized is None else hash(serialized)
|
||||
elif node is None:
|
||||
return None
|
||||
else:
|
||||
raise Exception("Bad node type")
|
||||
|
||||
|
||||
def db_get(db, key):
|
||||
try:
|
||||
return db.Get(key)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def db_put(db, key, value):
|
||||
print('putting', key, deserialize(value))
|
||||
if value is not None:
|
||||
db.Put(key, value)
|
||||
else:
|
||||
db.Delete(key)
|
||||
|
||||
|
||||
def propagate_deletions(db, batch, path):
|
||||
|
||||
for i in reversed(range(len(path))):
|
||||
current_node = deserialize(db_get(db, path[:i]))
|
||||
print(path[:i], current_node)
|
||||
assert isinstance(current_node, BranchNode)
|
||||
if current_node.values.count(ZERO) == 255:
|
||||
print('one nonzero; continuing')
|
||||
db_put(batch, path[:i], None)
|
||||
elif current_node.values.count(ZERO) == 254:
|
||||
print('two nonzeroes; replacing with leaf')
|
||||
db_put(batch, path[:i], sister_leaf.serialize())
|
||||
propagate_along_path(db, batch, path[:i], hash_node(sister_leaf))
|
||||
return
|
||||
else:
|
||||
print('3+ nonzeroes; removing')
|
||||
current_node.values[path[i]] = ZERO
|
||||
db_put(batch, path[:i], current_node.serialize())
|
||||
propagate_along_path(db, batch, path[:i], hash_node(current_node))
|
||||
return
|
||||
|
||||
|
||||
def propagate_along_path(db, batch, path, new_node_hash):
|
||||
for i in reversed(range(len(path))):
|
||||
current_node = deserialize(db_get(db, path[:i]))
|
||||
if current_node is None or isinstance(current_node, LeafNode):
|
||||
current_node = BranchNode()
|
||||
current_node.values[path[i]] = new_node_hash or ZERO
|
||||
db_put(batch, path[:i], current_node.serialize())
|
||||
new_node_hash = hash_node(current_node)
|
||||
db.Write(batch)
|
||||
|
||||
|
||||
|
||||
def add(db, key, value):
|
||||
print('## ADDING {} {} ##'.format(key.rstrip(b'\x00'), value.rstrip(b'\x00')))
|
||||
assert len(key) == 32
|
||||
batch = leveldb.WriteBatch()
|
||||
for i, byte in enumerate(key):
|
||||
path = key[:i]
|
||||
node_at_path = deserialize(db_get(db, path))
|
||||
if node_at_path is None:
|
||||
new_leaf = LeafNode(key, value)
|
||||
db_put(batch, path, new_leaf.serialize())
|
||||
propagate_along_path(db, batch, path, hash_node(new_leaf))
|
||||
return
|
||||
if isinstance(node_at_path, LeafNode):
|
||||
new_leaf = LeafNode(key, value)
|
||||
if node_at_path.key == key:
|
||||
db_put(batch, path, new_leaf.serialize())
|
||||
propagate_along_path(db, batch, path, hash_node(new_leaf))
|
||||
else:
|
||||
propagation_path = get_common_prefix(key, node_at_path.key)
|
||||
common_prefix_length = len(propagation_path)
|
||||
db_put(batch, key[:common_prefix_length+1], new_leaf.serialize())
|
||||
db_put(batch, node_at_path.key[:common_prefix_length+1], node_at_path.serialize())
|
||||
new_branch_node = BranchNode({
|
||||
key[common_prefix_length]: hash_node(new_leaf),
|
||||
node_at_path.key[common_prefix_length]: hash_node(node_at_path)
|
||||
})
|
||||
db_put(batch, key[:common_prefix_length], new_branch_node.serialize())
|
||||
propagate_along_path(db, batch, propagation_path, hash_node(new_branch_node))
|
||||
return
|
||||
raise Exception("How did we get here?")
|
||||
|
||||
def delete(db, key):
|
||||
print('## DELETING {} ##'.format(key.rstrip(b'\x00')))
|
||||
assert len(key) == 32
|
||||
batch = leveldb.WriteBatch()
|
||||
node_at_path = None
|
||||
for i, byte in enumerate(key):
|
||||
path = key[:i]
|
||||
penultimate_node = node_at_path
|
||||
node_at_path = deserialize(db_get(db, path))
|
||||
if node_at_path is None:
|
||||
return
|
||||
if isinstance(node_at_path, LeafNode):
|
||||
if node_at_path.key == key:
|
||||
db_put(batch, path, None)
|
||||
penultimate_path = key[:i-1]
|
||||
# Trivial case: last key is being removed
|
||||
if penultimate_node is None:
|
||||
db.Write(batch)
|
||||
return
|
||||
# Invariant: the immediately prior branch node cannot have only one child
|
||||
assert isinstance(penultimate_node, BranchNode)
|
||||
assert penultimate_node.values.count(ZERO) <= 254
|
||||
# Easy case: > 2 children
|
||||
if penultimate_node.values.count(ZERO) < 254:
|
||||
penultimate_node.values[key[i-1]] = ZERO
|
||||
db_put(batch, penultimate_path, penultimate_node.serialize())
|
||||
propagate_along_path(db, batch, penultimate_path, hash_node(penultimate_node))
|
||||
return
|
||||
# Hard case: 2 children
|
||||
sister_leaf_index = min(
|
||||
j for j in range(256) if penultimate_node.values[j] != ZERO and j != key[i-1]
|
||||
)
|
||||
sister_leaf = deserialize(db_get(db, penultimate_path + bytes([sister_leaf_index])))
|
||||
db_put(batch, penultimate_path, None)
|
||||
for j in reversed(range(i-1)):
|
||||
path = key[:j]
|
||||
node_at_path = deserialize(db_get(db, path))
|
||||
assert isinstance(node_at_path, BranchNode)
|
||||
if node_at_path.values.count(ZERO) == 255:
|
||||
db_put(batch, path, None)
|
||||
else:
|
||||
node_at_path.values[key[j]] = hash_node(sister_leaf)
|
||||
db_put(batch, path + bytes([key[j]]), sister_leaf.serialize())
|
||||
db_put(batch, path, node_at_path.serialize())
|
||||
propagate_along_path(db, batch, path, hash_node(node_at_path))
|
||||
return
|
||||
db_put(batch, b'', sister_leaf.serialize())
|
||||
db.Write(batch)
|
||||
return
|
||||
else:
|
||||
return
|
||||
raise Exception("How did we get here?")
|
||||
|
||||
|
||||
def get(db, key):
|
||||
for i in range(len(key)):
|
||||
node = deserialize(db_get(db, key[:i]))
|
||||
if node is None:
|
||||
return None
|
||||
elif isinstance(node, LeafNode):
|
||||
return node.value
|
||||
elif isinstance(node, BranchNode):
|
||||
continue
|
||||
|
||||
|
||||
def zpad32(x):
|
||||
return x + b'\x00' * (32 - len(x))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
db = leveldb.LevelDB(sys.argv[1])
|
||||
values = {
|
||||
b'cow': b'bovine',
|
||||
b'dog': b'canine',
|
||||
b'hippopotamus': b'riverhorse',
|
||||
b'hippogriff': b'harry',
|
||||
}
|
||||
hashes = [None]
|
||||
for k, v in values.items():
|
||||
add(db, zpad32(k), zpad32(v))
|
||||
hashes.append(hash_node(deserialize(db_get(db, b''))))
|
||||
print([hash_to_display_form(x) for x in hashes])
|
||||
print("Created full tree, hash: {}".format("0x"+hashes.pop().hex()))
|
||||
for k, v in values.items():
|
||||
assert get(db, zpad32(k)) == zpad32(v)
|
||||
for k, v in reversed(values.items()):
|
||||
delete(db, zpad32(k))
|
||||
assert hashes.pop() == hash_node(deserialize(db_get(db, b'')))
|
||||
167
ghost/ghost.py
167
ghost/ghost.py
@@ -1,167 +0,0 @@
|
||||
import os, random, time, struct
|
||||
from binascii import hexlify
|
||||
|
||||
LATENCY_FACTOR = 0.5
|
||||
NODE_COUNT = 131072
|
||||
BLOCK_ONCE_EVERY = 1024
|
||||
SIM_LENGTH = 131072
|
||||
|
||||
balances = [1] * NODE_COUNT
|
||||
latest_message = [b'\x00' * 32] * NODE_COUNT
|
||||
blocks = {b'\x00' * 32: (0, None)}
|
||||
children = {}
|
||||
ancestors = [{b'\x00' * 32: b'\x00' * 32} for i in range(16)]
|
||||
max_known_height = [0]
|
||||
|
||||
logz = [0, 0]
|
||||
for i in range(2, 10000):
|
||||
logz.append(logz[i // 2] + 1)
|
||||
height_to_bytes = [i.to_bytes(4, 'big') for i in range(10000)]
|
||||
|
||||
def get_height(block):
|
||||
return blocks[block][0]
|
||||
|
||||
cache = {}
|
||||
def get_ancestor(block, at_height):
|
||||
h = blocks[block][0]
|
||||
if at_height >= h:
|
||||
if at_height > h:
|
||||
return None
|
||||
else:
|
||||
return block
|
||||
cachekey = block + height_to_bytes[at_height]
|
||||
if cachekey in cache:
|
||||
return cache[cachekey]
|
||||
# assert get_height(ancestors[logz[h - at_height - 1]][block]) >= at_height
|
||||
o = get_ancestor(ancestors[logz[h - at_height - 1]][block], at_height)
|
||||
# assert get_height(o) == at_height
|
||||
cache[cachekey] = o
|
||||
return o
|
||||
|
||||
def add_block(parent):
|
||||
new_block_hash = os.urandom(32)
|
||||
h = get_height(parent)
|
||||
blocks[new_block_hash] = (h+1, parent)
|
||||
if parent not in children:
|
||||
children[parent] = []
|
||||
children[parent].append(new_block_hash)
|
||||
for i in range(16):
|
||||
if h % 2**i == 0:
|
||||
ancestors[i][new_block_hash] = parent
|
||||
else:
|
||||
ancestors[i][new_block_hash] = ancestors[i][parent]
|
||||
max_known_height[0] = max(max_known_height[0], h+1)
|
||||
|
||||
def add_attestation(block, validator_index):
|
||||
latest_message[validator_index] = block
|
||||
|
||||
def get_clear_winner(latest_votes, h):
|
||||
at_height = {}
|
||||
total_vote_count = 0
|
||||
for k, v in latest_votes.items():
|
||||
anc = get_ancestor(k, h)
|
||||
at_height[anc] = at_height.get(anc, 0) + v
|
||||
if anc is not None:
|
||||
total_vote_count += v
|
||||
for k, v in at_height.items():
|
||||
if v >= total_vote_count // 2:
|
||||
return k
|
||||
return None
|
||||
|
||||
def choose_best_child(votes):
|
||||
bitmask = 0
|
||||
for bit in range(255, -1, -1):
|
||||
zero_votes = 0
|
||||
one_votes = 0
|
||||
single_candidate = None
|
||||
for candidate in votes.keys():
|
||||
votes_for_candidate = votes[candidate]
|
||||
candidate_as_int = int.from_bytes(candidate, 'big')
|
||||
if candidate_as_int >> (bit+1) != bitmask:
|
||||
continue
|
||||
if (candidate_as_int >> bit) % 2 == 0:
|
||||
zero_votes += votes_for_candidate
|
||||
else:
|
||||
one_votes += votes_for_candidate
|
||||
if single_candidate is None:
|
||||
single_candidate = candidate
|
||||
else:
|
||||
single_candidate = False
|
||||
# print(bit, bitmask, zero_votes, one_votes)
|
||||
bitmask = (bitmask * 2) + (1 if one_votes > zero_votes else 0)
|
||||
if single_candidate:
|
||||
return single_candidate
|
||||
assert bit >= 1
|
||||
|
||||
def get_power_of_2_below(x):
|
||||
return 2**logz[x]
|
||||
|
||||
def ghost():
|
||||
# Get latest votes as key-value map
|
||||
latest_votes = {}
|
||||
for i in range(len(balances)):
|
||||
latest_votes[latest_message[i]] = latest_votes.get(latest_message[i], 0) + balances[i]
|
||||
|
||||
head = b'\x00' * 32
|
||||
height = 0
|
||||
while 1:
|
||||
# print('at', height, 'votes', sum(latest_votes.values()))
|
||||
c = children.get(head, [])
|
||||
if len(c) == 0:
|
||||
return head
|
||||
step = get_power_of_2_below(max_known_height[0] - height) // 2
|
||||
while step > 0:
|
||||
possible_clear_winner = get_clear_winner(latest_votes, height - (height % step) + step)
|
||||
if possible_clear_winner is not None:
|
||||
# print("Skipping from height %d to %d" % (get_height(head), get_height(possible_clear_winner)))
|
||||
head = possible_clear_winner
|
||||
break
|
||||
step //= 2
|
||||
if step > 0:
|
||||
pass
|
||||
elif len(c) == 1:
|
||||
# print("Only child fast path", latest_votes.get(head, 0))
|
||||
head = c[0]
|
||||
else:
|
||||
# print("Block %s at height %d with %d children!" %
|
||||
# (hexlify(head[:4]), height, len(c)), [hexlify(x[:4]) for x in c])
|
||||
child_votes = {x: 0.01 for x in c}
|
||||
for k, v in latest_votes.items():
|
||||
child = get_ancestor(k, height + 1)
|
||||
if child is not None:
|
||||
child_votes[child] = child_votes.get(child, 0) + v
|
||||
head = choose_best_child(child_votes)
|
||||
height = get_height(head)
|
||||
deletes = []
|
||||
for k, v in latest_votes.items():
|
||||
if get_ancestor(k, height) != head:
|
||||
deletes.append(k)
|
||||
for k in deletes:
|
||||
del latest_votes[k]
|
||||
|
||||
def get_perturbed_head(head):
|
||||
up_count = 0
|
||||
while get_height(head) > 0 and random.random() < LATENCY_FACTOR:
|
||||
head = blocks[head][1]
|
||||
up_count += 1
|
||||
for _ in range(random.randrange(up_count + 1)):
|
||||
if head in children:
|
||||
head = random.choice(children[head])
|
||||
return head
|
||||
|
||||
def simulate_chain():
|
||||
start_time = time.time()
|
||||
for i in range(0, SIM_LENGTH, BLOCK_ONCE_EVERY):
|
||||
head = ghost()
|
||||
for j in range(i, i + BLOCK_ONCE_EVERY):
|
||||
phead = get_perturbed_head(head)
|
||||
add_attestation(phead, i % NODE_COUNT)
|
||||
print("Adding new block on top of block %d %s. Time so far: %.3f" %
|
||||
(blocks[phead][0], hexlify(phead[:4]), time.time() - start_time))
|
||||
add_block(phead)
|
||||
# print([get_height(latest_message[i]) for i in range(NODE_COUNT)])
|
||||
|
||||
simulate_chain()
|
||||
print(len(str(cache)))
|
||||
print(len(str(ancestors)))
|
||||
print(len(str(blocks)))
|
||||
@@ -1,80 +0,0 @@
|
||||
import random
|
||||
|
||||
VALIDATORS = 5000
|
||||
EDGES = 255
|
||||
FINALITY = 4000
|
||||
|
||||
assert EDGES % 2 == 1
|
||||
|
||||
neighbors = list(range(VALIDATORS))
|
||||
edgelist = neighbors * EDGES
|
||||
random.shuffle(edgelist)
|
||||
edges = [edgelist[i*EDGES:i*EDGES+EDGES] for i in range(VALIDATORS)]
|
||||
|
||||
last_votes = '1' * FINALITY + '0' * (VALIDATORS - FINALITY)
|
||||
|
||||
while 1:
|
||||
new_zeroes = []
|
||||
for i in range(VALIDATORS):
|
||||
votes_for_0 = len([e for e in edges[i] if last_votes[e] == '0'])
|
||||
if votes_for_0 * 2 > EDGES:
|
||||
new_zeroes.append(i)
|
||||
new_last_votes = ''.join(['01'[last_votes[j]=='1' and j not in new_zeroes]
|
||||
for j in range(VALIDATORS)])
|
||||
print(new_last_votes.count('0'))
|
||||
if new_last_votes == last_votes:
|
||||
break
|
||||
last_votes = new_last_votes
|
||||
|
||||
print(last_votes.count('0'))
|
||||
|
||||
print("Initiating repeat-pivotal strategy")
|
||||
|
||||
threshold = EDGES // 2
|
||||
corrupted = 0
|
||||
|
||||
while last_votes != '0' * VALIDATORS:
|
||||
# Attempt strategy of finding the single pivotal validator that can
|
||||
# corrupt the most other validators
|
||||
pivotals = {}
|
||||
for i in range(VALIDATORS):
|
||||
votes_for_0 = len([e for e in edges[i] if last_votes[e] == '0'])
|
||||
assert last_votes[i] == '0' or votes_for_0 * 2 < EDGES
|
||||
if votes_for_0 == threshold:
|
||||
for e in edges[i]:
|
||||
if last_votes[e] == '1':
|
||||
pivotals[e] = pivotals.get(e, 0) + 1
|
||||
if len(pivotals) > 0:
|
||||
corrupt = [max(zip(pivotals.values(), pivotals.keys()))[1]]
|
||||
# Attempt strategy of finding the smallest group of validators that
|
||||
# can be corrupted to turn 1 more
|
||||
else:
|
||||
corrupt = []
|
||||
max_votes = 0
|
||||
for i in range(VALIDATORS):
|
||||
if last_votes[i] == '0':
|
||||
continue
|
||||
votes_for_0 = len([e for e in edges[i] if last_votes[e] == '0'])
|
||||
if votes_for_0 > max_votes:
|
||||
amount_to_corrupt = EDGES // 2 - votes_for_0 + 1
|
||||
corrupt = [e for e in edges[i] if last_votes[e] == '1'][:amount_to_corrupt]
|
||||
max_votes = votes_for_0
|
||||
|
||||
last_votes = ''.join(['01'[last_votes[j]=='1' and j not in corrupt]
|
||||
for j in range(VALIDATORS)])
|
||||
corrupted += len(corrupt)
|
||||
while 1:
|
||||
new_zeroes = []
|
||||
for i in range(VALIDATORS):
|
||||
votes_for_0 = len([e for e in edges[i] if last_votes[e] == '0'])
|
||||
if votes_for_0 * 2 > EDGES:
|
||||
new_zeroes.append(i)
|
||||
new_last_votes = ''.join(['01'[last_votes[j]=='1' and j not in new_zeroes]
|
||||
for j in range(VALIDATORS)])
|
||||
if new_last_votes == last_votes:
|
||||
break
|
||||
last_votes = new_last_votes
|
||||
print("Corrupted %r (total %d), now on fork chain: %d" %
|
||||
(corrupt, corrupted, new_last_votes.count('0')))
|
||||
|
||||
print("Total corrupted: %d" % corrupted)
|
||||
21
iceage.py
21
iceage.py
@@ -1,21 +0,0 @@
|
||||
import random
|
||||
import datetime
|
||||
|
||||
diffs = [3005 * 10**12]
|
||||
hashpower = diffs[0] / 14
|
||||
times = [1541247118]
|
||||
|
||||
|
||||
for i in range(6635692, 13000000):
|
||||
blocktime = random.expovariate(hashpower / diffs[-1])
|
||||
adjfac = max(1 - int(blocktime / 10), -99) / 2048.
|
||||
newdiff = diffs[-1] * (1 + adjfac)
|
||||
period = (i - 200000) // 100000 - 32
|
||||
if i > 200000:
|
||||
newdiff += 2 ** period
|
||||
diffs.append(newdiff)
|
||||
times.append(times[-1] + blocktime)
|
||||
if i % 10000 == 0:
|
||||
print('Block %d, approx ETH supply %d, time %r blocktime %.2f' % \
|
||||
(i, 60102216 * 1.199 + 5.3 * i, datetime.datetime.utcfromtimestamp(times[-1]).isoformat().replace('T',' '), diffs[-1] / hashpower))
|
||||
# print int(adjfac * 2048)
|
||||
@@ -1,70 +0,0 @@
|
||||
macro calldatachar($x):
|
||||
div(calldataload($x), 2**248)
|
||||
|
||||
# sum([2**x for x in [0x31, 0x32, 0x33, 0x3a, 0x3b, 0x3c, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x54, 0x55, 0xf0, 0xff]])
|
||||
mask = 57897811465722876096115075801844696845150819816717216876035649536196444422144
|
||||
|
||||
data approved_addrs[]
|
||||
|
||||
def submit(addr: address):
|
||||
# Copy external contract code
|
||||
extcode = string(~extcodesize(addr))
|
||||
~extcodecopy(addr, extcode, 0, ~extcodesize(addr))
|
||||
ops = array(~extcodesize(addr))
|
||||
pushargs = array(~extcodesize(addr))
|
||||
# Loop through the code
|
||||
with i = 0:
|
||||
with op = 0:
|
||||
while i < len(extcode):
|
||||
with c = ~mod(~mload(extcode + i - 31), 256):
|
||||
# Banned opcode
|
||||
if ~and(2**c, mask):
|
||||
~invalid()
|
||||
# PUSH
|
||||
if 0x60 <= c and c <= 0x7f:
|
||||
pushargs[op] = ~div(~mload(extcode + i + 1), 256 ** (0x7f - c))
|
||||
i += c - 0x5e
|
||||
# Call, callcode, delegatecall
|
||||
elif c == 0xf1 or c == 0xf2 or c == 0xf4:
|
||||
# Pattern-match four ways of setting the gas parameter:
|
||||
#
|
||||
# 1. PUSH<value>
|
||||
# 2. sub(GAS, PUSH<value>)
|
||||
# 3. GAS
|
||||
# 4. SWAP1 <address> (<gas> ... )
|
||||
# 5. DUP
|
||||
if op >= 2 and ops[op - 1] >= 0x60 and ops[op - 1] <= 0x7f:
|
||||
address_entry = op - 2
|
||||
elif op >= 4 and ops[op - 1] == 0x03 and ops[op - 2] == 0x5a and ops[op - 3] >= 0x60 and ops[op - 3] <= 0x7f:
|
||||
address_entry = op - 4
|
||||
elif op >= 2 and ops[op - 1] == 0x5a:
|
||||
address_entry = op - 2
|
||||
elif op >= 2 and ops[op - 1] == 0x90:
|
||||
address_entry = op - 2
|
||||
elif op >= 2 and ops[op - 1] >= 0x80 and ops[op - 1] < 0x90:
|
||||
address_entry = op - 2
|
||||
else:
|
||||
~invalid()
|
||||
# Operation before the gas parameter must satisfy one of three conditions:
|
||||
#
|
||||
# 1. It is a PUSH of an already approved address
|
||||
# 2. It is the address itself, through the ADDRESS opcode (ie. self-calling is permitted)
|
||||
# 3. It is a PUSH1, ie. less than 256 (ie. a present or future precompile)
|
||||
if self.approved_addrs[pushargs[address_entry]]:
|
||||
success = 1
|
||||
elif ops[address_entry] == 0x30:
|
||||
success = 1
|
||||
elif ops[address_entry] == 0x60:
|
||||
success = 1
|
||||
if not success:
|
||||
~invalid()
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
ops[op] = c
|
||||
op += 1
|
||||
self.approved_addrs[addr] = 1
|
||||
return(1: bool)
|
||||
|
||||
def const check(addr: address):
|
||||
return(self.approved_addrs[addr]:bool)
|
||||
@@ -1,123 +0,0 @@
|
||||
from ethereum import tester as t
|
||||
from ethereum import utils
|
||||
from ethereum import transactions
|
||||
import rlp
|
||||
import serpent
|
||||
s = t.state()
|
||||
c = s.abi_contract('check_for_impurity.se')
|
||||
|
||||
#from ethereum.slogging import LogRecorder, configure_logging, set_level
|
||||
#config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
|
||||
#configure_logging(config_string=config_string)
|
||||
|
||||
test1 = s.abi_contract("""
|
||||
|
||||
data horse
|
||||
|
||||
def foo():
|
||||
return self.horse
|
||||
|
||||
""")
|
||||
|
||||
try:
|
||||
c.submit(test1.address)
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
|
||||
failedtest_addr = "0x"+utils.encode_hex(test1.address)
|
||||
|
||||
test2 = s.abi_contract("""
|
||||
|
||||
def foo():
|
||||
return block.number
|
||||
|
||||
""")
|
||||
|
||||
try:
|
||||
c.submit(test2.address)
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
|
||||
test3 = s.abi_contract("""
|
||||
|
||||
def foo(x):
|
||||
return x * 2
|
||||
""")
|
||||
|
||||
c.submit(test3.address)
|
||||
|
||||
|
||||
|
||||
test4 = s.abi_contract("""
|
||||
|
||||
def modexp(b: uint256, e: uint256, m: uint256):
|
||||
if e == 0:
|
||||
return 1
|
||||
elif e == 1:
|
||||
return b
|
||||
elif e % 2 == 0:
|
||||
return self.modexp(~mulmod(b, b, m), ~div(e, 2), m)
|
||||
elif e % 2 == 1:
|
||||
return ~mulmod(self.modexp(~mulmod(b, b, m), ~div(e, 2), m), b, m)
|
||||
|
||||
""")
|
||||
|
||||
c.submit(test4.address)
|
||||
modexp_addr = "0x"+utils.encode_hex(test4.address)
|
||||
|
||||
test5 = s.abi_contract("""
|
||||
|
||||
def modinv(b, m):
|
||||
inpdata = [0xa7d4bbe6, b, m-2, m]
|
||||
outdata = [0]
|
||||
~call(100000, %s, 0, inpdata + 28, 100, outdata, 32)
|
||||
return outdata[0]
|
||||
|
||||
""" % modexp_addr)
|
||||
|
||||
c.submit(test5.address)
|
||||
|
||||
test6 = s.abi_contract("""
|
||||
def phooey(h, v, r, s):
|
||||
return ecrecover(h, v, r, s)
|
||||
""")
|
||||
|
||||
c.submit(test6.address)
|
||||
|
||||
test7 = s.abi_contract("""
|
||||
|
||||
def modinv(b, m):
|
||||
inpdata = [0xa7d4bbe6, b, m-2, m]
|
||||
outdata = [0]
|
||||
~call(msg.gas - 10000, %s, 0, inpdata + 28, 100, outdata, 32)
|
||||
return outdata[0]
|
||||
|
||||
""" % failedtest_addr)
|
||||
|
||||
try:
|
||||
c.submit(test7.address)
|
||||
success = True
|
||||
except:
|
||||
success = False
|
||||
assert not success
|
||||
|
||||
print('All tests passed')
|
||||
|
||||
kode = serpent.compile('check_for_impurity.se')
|
||||
|
||||
# Create transaction
|
||||
t = transactions.Transaction(0, 30 * 10**9, 2999999, '', 0, kode)
|
||||
t.startgas = t.intrinsic_gas_used + 50000 + 200 * len(kode)
|
||||
t.v = 27
|
||||
t.r = 45
|
||||
t.s = 79
|
||||
print('Send %d wei to %s' % (t.startgas * t.gasprice,
|
||||
'0x'+utils.encode_hex(t.sender)))
|
||||
|
||||
print('Contract address: 0x'+utils.encode_hex(utils.mk_contract_address(t.sender, 0)))
|
||||
print('Code: 0x'+utils.encode_hex(rlp.encode(t)))
|
||||
print('ABI declaration: '+repr(serpent.mk_full_signature('check_for_impurity.se')))
|
||||
@@ -1,19 +0,0 @@
|
||||
### Interface generator
|
||||
|
||||
This is a quick-and-dirty draft of a medium-level interface-generating language targeting Ethereum ABI contracts. The goal is to allow a user to write an "interface file" that specifies some details about what the layout of an interface for a contract should look like, but in a highly restricted form that ensures that the interface cannot be misleading as long as the given contract code/ABI is "honest".
|
||||
|
||||
This is intended to reduce barriers to entry to using smart contracts by generating a "default interface" for any contract, and this code should ideally be ported to Javascript and put into an extension (plus a traditional centralized website), so users can enter something like eg. `http://coolwebsiteurl.fancytld/0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae/my_interface_ptr` and have it pop up an automatically generated trustable interface for the contract.
|
||||
|
||||
There have already been experiments in automated interface generators that look at contract code alone, eg. for the contract above see https://etherscan.io/address/0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae#readContract, but so far use of these has not taken off because contract code simply does not contain enough UI-critical information about what functions are more "important", what suggestions to make to users, etc. However, simply using Javascript to generate interfaces faces the problem that it is _too_ liberal, allowing a malicious interface designer to easily make a misleading interface. The goal here is to make a language that is in the middle, expressive enough to be somewhat usable in some circumstances, but not expressive enough to allow misleading users.
|
||||
|
||||
The author is allowed to create a list of "tabs", each tab corresponding to one function execution (the language could and should be extended to offer a "dashboard" tab that simply shows info without offering any functions to execute), with a textbox for each argument. The interface designer can suggest prefilled values for arguments, which can be either constant values or calls of a constant function of the same contract, and can also give information to the user in the form of calls of constant functions with preset arguments that are repeated once per second.
|
||||
|
||||
See examples/foundation_out.html in this repo for a simple example using the Ethereum Foundation multisig wallet (only seven people in the world can use this to do anything useful; it is intended for illustrative purposes only). See examples/foundation_interface.json for the interface file that produces this.
|
||||
|
||||
### TODOs
|
||||
|
||||
* Translate the interface generator from python to javascript so it can be run inside a web browser
|
||||
* Allow interface files ("interface.json" here) to be written in YAML rather than JS
|
||||
* If the contract source code has variable-specific docstrings, display those beside the variable name and the textbox
|
||||
* Allow input types other than textboxes, eg. dropdowns, sliders, and specialized text boxes for ETH or token values so users do not have to multiply by 10\*\*18 manually; this could be done via a "specialInputFormats" list in the interface similar to the "prefills" list.
|
||||
* Allow outputs of shows to be represented in multiple ways.
|
||||
@@ -1,19 +0,0 @@
|
||||
import interface_generator as ig
|
||||
import json
|
||||
import sys
|
||||
|
||||
file2json = lambda x: json.load(open(x))
|
||||
|
||||
examples = file2json('examples/addresses.json')
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
print("Available examples: " + ", ".join(examples.keys()))
|
||||
elif sys.argv[1] in examples:
|
||||
address = examples[sys.argv[1]]
|
||||
abi = file2json('examples/%s_abi.json' % sys.argv[1])
|
||||
instructions = file2json('examples/%s_instructions.json' % sys.argv[1])
|
||||
interface = ig.generate_interface(address, abi, instructions)
|
||||
open('examples/%s_out.html' % sys.argv[1], 'w+').write(interface)
|
||||
print("Outputted file to examples/%s_out.html" % sys.argv[1])
|
||||
else:
|
||||
print("Example %s not found" % sys.argv[1])
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"foundation": "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
|
||||
"uniswap": "0x09cabec1ead1c0ba254b09efb3ee13841712be14"
|
||||
}
|
||||
@@ -1,425 +0,0 @@
|
||||
[
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_owner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "removeOwner",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_addr",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "isOwner",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": true,
|
||||
"inputs": [],
|
||||
"name": "m_numOwners",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [],
|
||||
"name": "resetSpentToday",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_owner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "addOwner",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": true,
|
||||
"inputs": [],
|
||||
"name": "m_required",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_h",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "confirm",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_newLimit",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "setDailyLimit",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"name": "_value",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"name": "_data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "execute",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "_r",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_operation",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "revoke",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_newRequired",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "changeRequirement",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": true,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_operation",
|
||||
"type": "bytes32"
|
||||
},
|
||||
{
|
||||
"name": "_owner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "hasConfirmed",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_to",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "kill",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": false,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"name": "_to",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "changeOwner",
|
||||
"outputs": [],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"constant": true,
|
||||
"inputs": [],
|
||||
"name": "m_dailyLimit",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"name": "_owners",
|
||||
"type": "address[]"
|
||||
},
|
||||
{
|
||||
"name": "_required",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"name": "_daylimit",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "operation",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "Confirmation",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "operation",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "Revoke",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "oldOwner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "OwnerChanged",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "OwnerAdded",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "oldOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "OwnerRemoved",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "newRequirement",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "RequirementChanged",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "_from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Deposit",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "SingleTransact",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "operation",
|
||||
"type": "bytes32"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "MultiTransact",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "operation",
|
||||
"type": "bytes32"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "initiator",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "ConfirmationNeeded",
|
||||
"type": "event"
|
||||
}
|
||||
]
|
||||
@@ -1,8 +0,0 @@
|
||||
[
|
||||
{
|
||||
"fun": "execute",
|
||||
"prefills": [{"arg": "_data", "value": "0x"},
|
||||
{"arg": "_value", "fun": "m_dailyLimit", "inputs": []}],
|
||||
"shows": [{"fun": "m_dailyLimit", "inputs": []}]
|
||||
}
|
||||
]
|
||||
@@ -1,63 +0,0 @@
|
||||
|
||||
<script>
|
||||
ethereum.enable()
|
||||
c = web3.eth.contract([{"constant": false, "inputs": [{"name": "_owner", "type": "address"}], "name": "removeOwner", "outputs": [], "type": "function"}, {"constant": false, "inputs": [{"name": "_addr", "type": "address"}], "name": "isOwner", "outputs": [{"name": "", "type": "bool"}], "type": "function"}, {"constant": true, "inputs": [], "name": "m_numOwners", "outputs": [{"name": "", "type": "uint256"}], "type": "function"}, {"constant": false, "inputs": [], "name": "resetSpentToday", "outputs": [], "type": "function"}, {"constant": false, "inputs": [{"name": "_owner", "type": "address"}], "name": "addOwner", "outputs": [], "type": "function"}, {"constant": true, "inputs": [], "name": "m_required", "outputs": [{"name": "", "type": "uint256"}], "type": "function"}, {"constant": false, "inputs": [{"name": "_h", "type": "bytes32"}], "name": "confirm", "outputs": [{"name": "", "type": "bool"}], "type": "function"}, {"constant": false, "inputs": [{"name": "_newLimit", "type": "uint256"}], "name": "setDailyLimit", "outputs": [], "type": "function"}, {"constant": false, "inputs": [{"name": "_to", "type": "address"}, {"name": "_value", "type": "uint256"}, {"name": "_data", "type": "bytes"}], "name": "execute", "outputs": [{"name": "_r", "type": "bytes32"}], "type": "function"}, {"constant": false, "inputs": [{"name": "_operation", "type": "bytes32"}], "name": "revoke", "outputs": [], "type": "function"}, {"constant": false, "inputs": [{"name": "_newRequired", "type": "uint256"}], "name": "changeRequirement", "outputs": [], "type": "function"}, {"constant": true, "inputs": [{"name": "_operation", "type": "bytes32"}, {"name": "_owner", "type": "address"}], "name": "hasConfirmed", "outputs": [{"name": "", "type": "bool"}], "type": "function"}, {"constant": false, "inputs": [{"name": "_to", "type": "address"}], "name": "kill", "outputs": [], "type": "function"}, {"constant": false, "inputs": [{"name": "_from", "type": "address"}, {"name": "_to", "type": "address"}], "name": "changeOwner", "outputs": [], "type": "function"}, {"constant": true, "inputs": [], "name": "m_dailyLimit", "outputs": [{"name": "", "type": "uint256"}], "type": "function"}, {"inputs": [{"name": "_owners", "type": "address[]"}, {"name": "_required", "type": "uint256"}, {"name": "_daylimit", "type": "uint256"}], "type": "constructor"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "owner", "type": "address"}, {"indexed": false, "name": "operation", "type": "bytes32"}], "name": "Confirmation", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "owner", "type": "address"}, {"indexed": false, "name": "operation", "type": "bytes32"}], "name": "Revoke", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "oldOwner", "type": "address"}, {"indexed": false, "name": "newOwner", "type": "address"}], "name": "OwnerChanged", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "newOwner", "type": "address"}], "name": "OwnerAdded", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "oldOwner", "type": "address"}], "name": "OwnerRemoved", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "newRequirement", "type": "uint256"}], "name": "RequirementChanged", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "_from", "type": "address"}, {"indexed": false, "name": "value", "type": "uint256"}], "name": "Deposit", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "owner", "type": "address"}, {"indexed": false, "name": "value", "type": "uint256"}, {"indexed": false, "name": "to", "type": "address"}, {"indexed": false, "name": "data", "type": "bytes"}], "name": "SingleTransact", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "owner", "type": "address"}, {"indexed": false, "name": "operation", "type": "bytes32"}, {"indexed": false, "name": "value", "type": "uint256"}, {"indexed": false, "name": "to", "type": "address"}, {"indexed": false, "name": "data", "type": "bytes"}], "name": "MultiTransact", "type": "event"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "operation", "type": "bytes32"}, {"indexed": false, "name": "initiator", "type": "address"}, {"indexed": false, "name": "value", "type": "uint256"}, {"indexed": false, "name": "to", "type": "address"}, {"indexed": false, "name": "data", "type": "bytes"}], "name": "ConfirmationNeeded", "type": "event"}]).at("0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae")
|
||||
</script>
|
||||
|
||||
<b><span id="execute___start">execute</span></b>
|
||||
<table>
|
||||
|
||||
<tr>
|
||||
<td><span>_to</span></td>
|
||||
<td><input id="execute____to"></input></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><span>_value</span></td>
|
||||
<td><input id="execute____value"></input></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><span>_data</span></td>
|
||||
<td><input id="execute____data"></input></td>
|
||||
</tr>
|
||||
|
||||
<script>
|
||||
document.getElementById("execute____data").value = "0x"
|
||||
</script>
|
||||
|
||||
<script>
|
||||
setTimeout(function() {
|
||||
c.m_dailyLimit( function(err, res) {
|
||||
document.getElementById("execute____value").value = res.toString ? res.toString("10") : res;
|
||||
});
|
||||
}, 1000)
|
||||
</script>
|
||||
|
||||
<script>
|
||||
function ___execute() {
|
||||
c.execute(document.getElementById("execute____to").value, document.getElementById("execute____value").value, document.getElementById("execute____data").value, , function(err, res) { document.getElementById("execute___result").value = err+res; });
|
||||
}
|
||||
</script>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td><input type="submit" onclick="___execute()" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><span>m_dailyLimit</span></td>
|
||||
<td><span id="execute__show__0"></span></td>
|
||||
</tr>
|
||||
<script>
|
||||
setInterval(function() {
|
||||
c.m_dailyLimit( function(err, res) {
|
||||
document.getElementById("execute__show__0").innerText = res.toString ? res.toString("10") : res;
|
||||
});
|
||||
}, 1000)
|
||||
</script>
|
||||
|
||||
<tr>
|
||||
<td></td>
|
||||
<td><span id="execute___result"></td>
|
||||
</tr>
|
||||
</table>
|
||||
File diff suppressed because one or more lines are too long
@@ -1,7 +0,0 @@
|
||||
[
|
||||
{
|
||||
"fun": "ethToTokenSwapInput",
|
||||
"prefills": [],
|
||||
"shows": [{"fun": "getEthToTokenInputPrice", "inputs": ["1"]}]
|
||||
}
|
||||
]
|
||||
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user