diff --git a/.circleci/config.yml b/.circleci/config.yml index d4c672ca2..0e0fac0d4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with default keys" steps: - restore_cached_venv: - venv_name: v1-pyspec-05 + venv_name: v2-pyspec reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }} save_default_cached_venv: description: Save a venv into a cache with default keys" steps: - save_cached_venv: - venv_name: v1-pyspec-05 + venv_name: v2-pyspec reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }} venv_path: ./test_libs/pyspec/venv jobs: diff --git a/.gitignore b/.gitignore index 3dd86fc80..c6b39955f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ eth2.0-spec-tests/ # Dynamically built from Markdown spec test_libs/pyspec/eth2spec/phase0/spec.py +test_libs/pyspec/eth2spec/phase1/spec.py diff --git a/Makefile b/Makefile index 47be51e0a..66c5ba3ec 100644 --- a/Makefile +++ b/Makefile @@ -14,10 +14,15 @@ YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERAT GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py -PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) +PY_SPEC_PHASE_0_DEPS = $(SPEC_DIR)/core/0_*.md + +PY_SPEC_PHASE_1_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase1/spec.py +PY_SPEC_PHASE_1_DEPS = $(SPEC_DIR)/core/1_*.md + +PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS) -.PHONY: clean all test citest gen_yaml_tests pyspec phase0 install_test +.PHONY: clean all test citest gen_yaml_tests pyspec phase0 phase1 install_test all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) @@ -38,14 +43,12 @@ test: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest eth2spec citest: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml . - -install_lint: - cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install flake8==3.5.0 + cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; \ + python -m pytest --junitxml=test-reports/eth2spec/test_results_phase0.xml eth2spec lint: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; \ - flake8 --max-line-length=120 ./eth2spec; + flake8 --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec; compile_deposit_contract: cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; \ @@ -66,13 +69,11 @@ test_deposit_contract: $(PY_SPEC_ALL_TARGETS) # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) -# "make phase0" to create pyspec for phase0 -phase0: $(PY_SPEC_PHASE_0_TARGETS) - - -$(PY_SPEC_DIR)/eth2spec/phase0/spec.py: - python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ +$(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS) + python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $@ +$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS) + python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $@ CURRENT_DIR = ${CURDIR} diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index caae4623b..73448c3c6 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -74,6 +74,8 @@ PERSISTENT_COMMITTEE_PERIOD: 2048 MAX_EPOCHS_PER_CROSSLINK: 64 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 +# [customized] 2**12 (= 4,096) epochs 18 days +EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096 # State list lengths diff --git a/configs/fork_timelines/mainnet.yaml b/configs/fork_timelines/mainnet.yaml index 8d51d6582..0bb3c9db1 100644 --- a/configs/fork_timelines/mainnet.yaml +++ b/configs/fork_timelines/mainnet.yaml @@ -7,6 +7,6 @@ phase0: 67108864 # phase0_funny_fork_name: 67116000 # Example 2: -# Should be equal to PHASE_1_GENESIS_EPOCH +# Should be equal to PHASE_1_FORK_EPOCH # (placeholder in example value here) # phase1: 67163000 diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000..25b46decf --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,32 @@ +# Building pyspecs from specs.md + +The benefit of the particular spec design is that the given markdown files can be converted to a `spec.py` file for the purposes of testing and linting. The result of this is that bugs are discovered and patched more quickly. + +Specs can be built from either a single markdown document or multiple files that must be combined in a given order. Given 2 spec objects, `build_spec.combine_spec_objects` will combine them into a single spec object which, subsequently, can be converted into a `specs.py`. + +## Usage + +For usage of the spec builder run `python3 -m build_spec --help`. + +## `@Labels` and inserts + +The functioning of the spec combiner is largely automatic in that given `spec0.md` and `spec1.md`, SSZ Objects will be extended and old functions will be overwritten. Extra functionality is provided for more granular control over how files are combined. In the event that only a small portion of code is to be added to an existing function, insert functionality is provided. This saves having to completely redefine the old function from `spec0.md` in `spec1.md`. This is done by marking where the change is to occur in the old file and marking which code is to be inserted in the new file. This is done as follows: + +* In the old file, a label is added as a python comment marking where the code is to be inserted. This would appear as follows in `spec0.md`: + +```python +def foo(x): + x << 1 + # @YourLabelHere + return x +``` + +* In spec1, the new code could then be inserted by having a code-block that looked as follows: + +```python +#begin insert @YourLabelHere + x += x +#end insert @YourLabelHere +``` + +**Note** that the code to be inserted has the **same level of indentation** as the surrounding code of its destination insert point. diff --git a/scripts/phase0/__init__.py b/scripts/__init__.py similarity index 100% rename from scripts/phase0/__init__.py rename to scripts/__init__.py diff --git a/scripts/build_spec.py b/scripts/build_spec.py new file mode 100644 index 000000000..7a51970e3 --- /dev/null +++ b/scripts/build_spec.py @@ -0,0 +1,277 @@ +import re +from function_puller import ( + get_spec, + SpecObject, +) +from argparse import ArgumentParser +from typing import ( + Dict, + List, + Optional, +) + + +PHASE0_IMPORTS = '''from typing import ( + Any, + Dict, + List, + NewType, + Tuple, +) + +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, + signing_root, +) +from eth2spec.utils.ssz.ssz_typing import ( + # unused: uint8, uint16, uint32, uint128, uint256, + uint64, Container, Vector, BytesN +) +from eth2spec.utils.bls import ( + bls_aggregate_pubkeys, + bls_verify, + bls_verify_multiple, +) +# Note: 'int' type defaults to being interpreted as a uint64 by SSZ implementation. + +from eth2spec.utils.hash_function import hash +''' +PHASE1_IMPORTS = '''from typing import ( + Any, + Dict, + List, + NewType, + Tuple, +) + +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, + signing_root, + serialize, + is_empty, +) +from eth2spec.utils.ssz.ssz_typing import ( + # unused: uint8, uint16, uint32, uint128, uint256, + uint64, Container, Vector, BytesN +) +from eth2spec.utils.bls import ( + bls_aggregate_pubkeys, + bls_verify, + bls_verify_multiple, +) + +from eth2spec.utils.hash_function import hash +''' +NEW_TYPES = { + 'Slot': 'int', + 'Epoch': 'int', + 'Shard': 'int', + 'ValidatorIndex': 'int', + 'Gwei': 'int', +} +BYTE_TYPES = [4, 32, 48, 96] +SUNDRY_FUNCTIONS = ''' +def get_ssz_type_by_name(name: str) -> Container: + return globals()[name] + + +# Monkey patch validator compute committee code +_compute_committee = compute_committee +committee_cache = {} + + +def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]: + param_hash = (hash_tree_root(indices), seed, index, count) + + if param_hash in committee_cache: + return committee_cache[param_hash] + else: + ret = _compute_committee(indices, seed, index, count) + committee_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} + + +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret + + +# Access to overwrite spec constants based on configuration +def apply_constants_preset(preset: Dict[str, Any]): + global_vars = globals() + for k, v in preset.items(): + global_vars[k] = v + + # Deal with derived constants + global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) + + # Initialize SSZ types again, to account for changed lengths + init_SSZ_types() +''' + + +def objects_to_spec(functions: Dict[str, str], + constants: Dict[str, str], + ssz_objects: Dict[str, str], + inserts: Dict[str, str], + imports: Dict[str, str], + new_types: Dict[str, str], + byte_types: List[int], + ) -> str: + """ + Given all the objects that constitute a spec, combine them into a single pyfile. + """ + new_type_definitions = \ + '\n'.join(['''%s = NewType('%s', %s)''' % (key, key, value) for key, value in new_types.items()]) + new_type_definitions += '\n' + '\n'.join(['Bytes%s = BytesN[%s]' % (n, n) for n in byte_types]) + functions_spec = '\n\n'.join(functions.values()) + constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants)) + ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values()) + ssz_objects_reinitialization_spec = ( + 'def init_SSZ_types():\n global_vars = globals()\n\n ' + + '\n\n '.join([re.sub(r'(?!\n\n)\n', r'\n ', value[:-1]) for value in ssz_objects.values()]) + + '\n\n' + + '\n'.join(map(lambda x: ' global_vars[\'%s\'] = %s' % (x, x), ssz_objects.keys())) + ) + spec = ( + imports + + '\n' + new_type_definitions + + '\n\n' + constants_spec + + '\n\n\n' + ssz_objects_instantiation_spec + + '\n\n' + functions_spec + + '\n' + SUNDRY_FUNCTIONS + + '\n\n' + ssz_objects_reinitialization_spec + + '\n' + ) + # Handle @inserts + for key, value in inserts.items(): + spec = re.sub('[ ]*# %s\\n' % key, value, spec) + return spec + + +def combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]: + for key, value in new_functions.items(): + old_functions[key] = value + return old_functions + + +def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]: + for key, value in new_constants.items(): + old_constants[key] = value + return old_constants + + +def dependency_order_ssz_objects(objects: Dict[str, str]) -> None: + """ + Determines which SSZ Object is depenedent on which other and orders them appropriately + """ + items = list(objects.items()) + for key, value in items: + dependencies = re.findall(r'(: [A-Z][\w[]*)', value) + dependencies = map(lambda x: re.sub(r'\W|Vector|List|Container|uint\d+|Bytes\d+|bytes', '', x), dependencies) + for dep in dependencies: + if dep in NEW_TYPES or len(dep) == 0: + continue + key_list = list(objects.keys()) + for item in [dep, key] + key_list[key_list.index(dep)+1:]: + objects[item] = objects.pop(item) + + +def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str]) -> Dict[str, str]: + """ + Takes in old spec and new spec ssz objects, combines them, + and returns the newer versions of the objects in dependency order. + """ + for key, value in new_objects.items(): + if key in old_objects: + # remove trailing newline + old_objects[key] = old_objects[key] + # remove leading variable name + value = re.sub(r'^class [\w]*\(Container\):\n', '', value) + old_objects[key] = old_objects.get(key, '') + value + dependency_order_ssz_objects(old_objects) + return old_objects + + +# inserts are handeled the same way as functions +combine_inserts = combine_functions + + +def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject: + """ + Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function. + """ + functions0, constants0, ssz_objects0, inserts0 = spec0 + functions1, constants1, ssz_objects1, inserts1 = spec1 + functions = combine_functions(functions0, functions1) + constants = combine_constants(constants0, constants1) + ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1) + inserts = combine_inserts(inserts0, inserts1) + return functions, constants, ssz_objects, inserts + + +def build_phase0_spec(sourcefile: str, outfile: str=None) -> Optional[str]: + functions, constants, ssz_objects, inserts = get_spec(sourcefile) + spec = objects_to_spec(functions, constants, ssz_objects, inserts, PHASE0_IMPORTS, NEW_TYPES, BYTE_TYPES) + if outfile is not None: + with open(outfile, 'w') as out: + out.write(spec) + return spec + + +def build_phase1_spec(phase0_sourcefile: str, + phase1_custody_sourcefile: str, + phase1_shard_sourcefile: str, + outfile: str=None) -> Optional[str]: + phase0_spec = get_spec(phase0_sourcefile) + phase1_custody = get_spec(phase1_custody_sourcefile) + phase1_shard_data = get_spec(phase1_shard_sourcefile) + spec_objects = phase0_spec + for value in [phase1_custody, phase1_shard_data]: + spec_objects = combine_spec_objects(spec_objects, value) + spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS, NEW_TYPES, BYTE_TYPES) + if outfile is not None: + with open(outfile, 'w') as out: + out.write(spec) + return spec + + +if __name__ == '__main__': + description = ''' +Build the specs from the md docs. +If building phase 0: + 1st argument is input spec.md + 2nd argument is output spec.py + +If building phase 1: + 1st argument is input spec_phase0.md + 2nd argument is input spec_phase1_custody.md + 3rd argument is input spec_phase1_shard_data.md + 4th argument is output spec.py +''' + parser = ArgumentParser(description=description) + parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #") + parser.add_argument(dest="files", help="Input and output files", nargs="+") + + args = parser.parse_args() + if args.phase == 0: + if len(args.files) == 2: + build_phase0_spec(*args.files) + else: + print(" Phase 0 requires an output as well as an input file.") + elif args.phase == 1: + if len(args.files) == 4: + build_phase1_spec(*args.files) + else: + print(" Phase 1 requires an output as well as 3 input files (phase0.md and phase1.md, phase1.md)") + else: + print("Invalid phase: {0}".format(args.phase)) diff --git a/scripts/function_puller.py b/scripts/function_puller.py new file mode 100644 index 000000000..303d4ec2f --- /dev/null +++ b/scripts/function_puller.py @@ -0,0 +1,83 @@ +import re +from typing import Dict, Tuple, NewType + + +FUNCTION_REGEX = r'^def [\w_]*' +BEGIN_INSERT_REGEX = r'# begin insert ' +END_INSERT_REGEX = r'# end insert' + +SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]]) + + +def get_spec(file_name: str) -> SpecObject: + """ + Takes in the file name of a spec.md file, opens it and returns the following objects: + functions = {function_name: function_code} + constants= {constant_name: constant_code} + ssz_objects= {object_name: object} + inserts= {insert_tag: code to be inserted} + + Note: This function makes heavy use of the inherent ordering of dicts, + if this is not supported by your python version, it will not work. + """ + pulling_from = None # line number of start of latest object + current_name = None # most recent section title + insert_name = None # stores the label of the current insert object + functions = {} + constants = {} + ssz_objects = {} + inserts = {} + function_matcher = re.compile(FUNCTION_REGEX) + inserts_matcher = re.compile(BEGIN_INSERT_REGEX) + for linenum, line in enumerate(open(file_name).readlines()): + line = line.rstrip() + if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': + current_name = line[line[:-1].rfind('`') + 1: -1] + if line[:9] == '```python': + assert pulling_from is None + pulling_from = linenum + 1 + elif line[:3] == '```': + pulling_from = None + elif inserts_matcher.match(line) is not None: + # Find @insert names + insert_name = re.search(r'@[\w]*', line).group(0) + elif insert_name is not None: + # In insert mode, either the next line is more code, or the end of the insert + if re.match(END_INSERT_REGEX, line) is not None: + insert_name = None + else: + inserts[insert_name] = inserts.get(insert_name, '') + line + '\n' + else: + # Handle function definitions & ssz_objects + if pulling_from is not None: + # SSZ Object + if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):': + name = line[6:-12] + # Check consistency with markdown header + assert name == current_name + is_ssz = True + # function definition + elif function_matcher.match(line) is not None: + current_name = function_matcher.match(line).group(0) + is_ssz = False + if is_ssz: + ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n' + else: + functions[current_name] = functions.get(current_name, '') + line + '\n' + # Handle constant table entries + elif pulling_from is None and len(line) > 0 and line[0] == '|': + row = line[1:].split('|') + if len(row) >= 2: + for i in range(2): + row[i] = row[i].strip().strip('`') + if '`' in row[i]: + row[i] = row[i][:row[i].find('`')] + eligible = True + if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_': + eligible = False + for c in row[0]: + if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': + eligible = False + if eligible: + constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890') + return functions, constants, ssz_objects, inserts diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py deleted file mode 100644 index 457a13da0..000000000 --- a/scripts/phase0/build_spec.py +++ /dev/null @@ -1,97 +0,0 @@ -import sys -import function_puller - - -def build_phase0_spec(sourcefile, outfile): - code_lines = [] - code_lines.append(""" - -from typing import ( - Any, - Dict, - List, - NewType, - Tuple, -) -from eth2spec.utils.minimal_ssz import ( - SSZType, - hash_tree_root, - signing_root, -) -from eth2spec.utils.hash_function import hash -from eth2spec.utils.bls import ( - bls_aggregate_pubkeys, - bls_verify, - bls_verify_multiple, -) - - -# stub, will get overwritten by real var -SLOTS_PER_EPOCH = 64 - -Slot = NewType('Slot', int) # uint64 -Epoch = NewType('Epoch', int) # uint64 -Shard = NewType('Shard', int) # uint64 -ValidatorIndex = NewType('ValidatorIndex', int) # uint64 -Gwei = NewType('Gwei', int) # uint64 -Bytes32 = NewType('Bytes32', bytes) # bytes32 -BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 -BLSSignature = NewType('BLSSignature', bytes) # bytes96 -Store = None -""") - - code_lines += function_puller.get_spec(sourcefile) - - code_lines.append(""" -# Monkey patch validator compute committee code -_compute_committee = compute_committee -committee_cache = {} - - -def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]: - param_hash = (hash_tree_root(indices), seed, index, count) - - if param_hash in committee_cache: - return committee_cache[param_hash] - else: - ret = _compute_committee(indices, seed, index, count) - committee_cache[param_hash] = ret - return ret - - -# Monkey patch hash cache -_hash = hash -hash_cache = {} - - -def hash(x): - if x in hash_cache: - return hash_cache[x] - else: - ret = _hash(x) - hash_cache[x] = ret - return ret - - -# Access to overwrite spec constants based on configuration -def apply_constants_preset(preset: Dict[str, Any]): - global_vars = globals() - for k, v in preset.items(): - global_vars[k] = v - - # Deal with derived constants - global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) - - # Initialize SSZ types again, to account for changed lengths - init_SSZ_types() -""") - - with open(outfile, 'w') as out: - out.write("\n".join(code_lines)) - - -if __name__ == '__main__': - if len(sys.argv) < 3: - print("Usage: ") - build_phase0_spec(sys.argv[1], sys.argv[2]) - diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py deleted file mode 100644 index e54df3ef0..000000000 --- a/scripts/phase0/function_puller.py +++ /dev/null @@ -1,75 +0,0 @@ -import sys -from typing import List - - -def get_spec(file_name: str) -> List[str]: - code_lines = [] - pulling_from = None - current_name = None - current_typedef = None - type_defs = [] - for linenum, line in enumerate(open(sys.argv[1]).readlines()): - line = line.rstrip() - if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': - current_name = line[line[:-1].rfind('`') + 1: -1] - if line[:9] == '```python': - assert pulling_from is None - pulling_from = linenum + 1 - elif line[:3] == '```': - if pulling_from is None: - pulling_from = linenum - else: - if current_typedef is not None: - assert code_lines[-1] == '}' - code_lines[-1] = '})' - current_typedef[-1] = '})' - type_defs.append((current_name, current_typedef)) - pulling_from = None - current_typedef = None - else: - if pulling_from == linenum and line == '{': - code_lines.append('%s = SSZType({' % current_name) - current_typedef = ['global_vars["%s"] = SSZType({' % current_name] - elif pulling_from is not None: - # Add some whitespace between functions - if line[:3] == 'def': - code_lines.append('') - code_lines.append('') - code_lines.append(line) - # Remember type def lines - if current_typedef is not None: - current_typedef.append(line) - elif pulling_from is None and len(line) > 0 and line[0] == '|': - row = line[1:].split('|') - if len(row) >= 2: - for i in range(2): - row[i] = row[i].strip().strip('`') - if '`' in row[i]: - row[i] = row[i][:row[i].find('`')] - eligible = True - if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_': - eligible = False - for c in row[0]: - if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': - eligible = False - if eligible: - code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890'))) - # Build type-def re-initialization - code_lines.append('\n') - code_lines.append('def init_SSZ_types():') - code_lines.append(' global_vars = globals()') - for ssz_type_name, ssz_type in type_defs: - code_lines.append('') - for type_line in ssz_type: - if len(type_line) > 0: - code_lines.append(' ' + type_line) - code_lines.append('\n') - code_lines.append('ssz_types = [\n') - for (ssz_type_name, _) in type_defs: - code_lines.append(f' "{ssz_type_name}",\n') - code_lines.append(']') - code_lines.append('\n') - code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:') - code_lines.append(' return globals()[name]') - code_lines.append('') - return code_lines diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b5f208488..a6d9d23c5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -23,16 +23,16 @@ - [Data structures](#data-structures) - [Misc dependencies](#misc-dependencies) - [`Fork`](#fork) + - [`Validator`](#validator) - [`Crosslink`](#crosslink) - - [`Eth1Data`](#eth1data) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`IndexedAttestation`](#indexedattestation) + - [`PendingAttestation`](#pendingattestation) + - [`Eth1Data`](#eth1data) + - [`HistoricalBatch`](#historicalbatch) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - - [`Validator`](#validator) - - [`PendingAttestation`](#pendingattestation) - - [`HistoricalBatch`](#historicalbatch) - [Beacon operations](#beacon-operations) - [`ProposerSlashing`](#proposerslashing) - [`AttesterSlashing`](#attesterslashing) @@ -266,162 +266,151 @@ The types are defined topologically to aid in facilitating an executable version #### `Fork` ```python -{ +class Fork(Container): # Previous fork version - 'previous_version': 'bytes4', + previous_version: Bytes4 # Current fork version - 'current_version': 'bytes4', + current_version: Bytes4 # Fork epoch number - 'epoch': 'uint64', -} -``` - -#### `Crosslink` - -```python -{ - # Shard number - 'shard': 'uint64', - # Crosslinking data from epochs [start....end-1] - 'start_epoch': 'uint64', - 'end_epoch': 'uint64', - # Root of the previous crosslink - 'parent_root': 'bytes32', - # Root of the crosslinked shard data since the previous crosslink - 'data_root': 'bytes32', -} -``` - -#### `Eth1Data` - -```python -{ - # Root of the deposit tree - 'deposit_root': 'bytes32', - # Total number of deposits - 'deposit_count': 'uint64', - # Block hash - 'block_hash': 'bytes32', -} -``` - -#### `AttestationData` - -```python -{ - # LMD GHOST vote - 'beacon_block_root': 'bytes32', - - # FFG vote - 'source_epoch': 'uint64', - 'source_root': 'bytes32', - 'target_epoch': 'uint64', - 'target_root': 'bytes32', - - # Crosslink vote - 'crosslink': Crosslink, -} -``` - -#### `AttestationDataAndCustodyBit` - -```python -{ - # Attestation data - 'data': AttestationData, - # Custody bit - 'custody_bit': 'bool', -} -``` - -#### `IndexedAttestation` - -```python -{ - # Validator indices - 'custody_bit_0_indices': ['uint64'], - 'custody_bit_1_indices': ['uint64'], - # Attestation data - 'data': AttestationData, - # Aggregate signature - 'signature': 'bytes96', -} -``` - -#### `DepositData` - -```python -{ - # BLS pubkey - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Amount in Gwei - 'amount': 'uint64', - # Container self-signature - 'signature': 'bytes96', -} -``` - -#### `BeaconBlockHeader` - -```python -{ - 'slot': 'uint64', - 'parent_root': 'bytes32', - 'state_root': 'bytes32', - 'body_root': 'bytes32', - 'signature': 'bytes96', -} + epoch: uint64 ``` #### `Validator` ```python -{ +class Validator(Container): # BLS public key - 'pubkey': 'bytes48', + pubkey: Bytes48 # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', + withdrawal_credentials: Bytes32 # Epoch when became eligible for activation - 'activation_eligibility_epoch': 'uint64', + activation_eligibility_epoch: uint64 # Epoch when validator activated - 'activation_epoch': 'uint64', + activation_epoch: uint64 # Epoch when validator exited - 'exit_epoch': 'uint64', + exit_epoch: uint64 # Epoch when validator is eligible to withdraw - 'withdrawable_epoch': 'uint64', + withdrawable_epoch: uint64 # Was the validator slashed - 'slashed': 'bool', + slashed: bool # Effective balance - 'effective_balance': 'uint64', -} + effective_balance: uint64 +``` + +#### `Crosslink` + +```python +class Crosslink(Container): + # Shard number + shard: uint64 + # Crosslinking data from epochs [start....end-1] + start_epoch: uint64 + end_epoch: uint64 + # Root of the previous crosslink + parent_root: Bytes32 + # Root of the crosslinked shard data since the previous crosslink + data_root: Bytes32 +``` + +#### `AttestationData` + +```python +class AttestationData(Container): + # LMD GHOST vote + beacon_block_root: Bytes32 + + # FFG vote + source_epoch: uint64 + source_root: Bytes32 + target_epoch: uint64 + target_root: Bytes32 + + # Crosslink vote + crosslink: Crosslink +``` + +#### `AttestationDataAndCustodyBit` + +```python +class AttestationDataAndCustodyBit(Container): + # Attestation data + data: AttestationData + # Custody bit + custody_bit: bool +``` + +#### `IndexedAttestation` + +```python +class IndexedAttestation(Container): + # Validator indices + custody_bit_0_indices: List[uint64] + custody_bit_1_indices: List[uint64] + # Attestation data + data: AttestationData + # Aggregate signature + signature: Bytes96 ``` #### `PendingAttestation` ```python -{ +class PendingAttestation(Container): # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', + aggregation_bitfield: bytes # Attestation data - 'data': AttestationData, + data: AttestationData # Inclusion delay - 'inclusion_delay': 'uint64', + inclusion_delay: uint64 # Proposer index - 'proposer_index': 'uint64', -} + proposer_index: uint64 +``` + +#### `Eth1Data` + +```python +class Eth1Data(Container): + # Root of the deposit tree + deposit_root: Bytes32 + # Total number of deposits + deposit_count: uint64 + # Block hash + block_hash: Bytes32 ``` #### `HistoricalBatch` ```python -{ +class HistoricalBatch(Container): # Block roots - 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] # State roots - 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], -} + state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] +``` + +#### `DepositData` + +```python +class DepositData(Container): + # BLS pubkey + pubkey: Bytes48 + # Withdrawal credentials + withdrawal_credentials: Bytes32 + # Amount in Gwei + amount: uint64 + # Container self-signature + signature: Bytes96 +``` + +#### `BeaconBlockHeader` + +```python +class BeaconBlockHeader(Container): + slot: uint64 + parent_root: Bytes32 + state_root: Bytes32 + body_root: Bytes32 + signature: Bytes96 ``` ### Beacon operations @@ -429,85 +418,79 @@ The types are defined topologically to aid in facilitating an executable version #### `ProposerSlashing` ```python -{ +class ProposerSlashing(Container): # Proposer index - 'proposer_index': 'uint64', + proposer_index: uint64 # First block header - 'header_1': BeaconBlockHeader, + header_1: BeaconBlockHeader # Second block header - 'header_2': BeaconBlockHeader, -} + header_2: BeaconBlockHeader ``` #### `AttesterSlashing` ```python -{ +class AttesterSlashing(Container): # First attestation - 'attestation_1': IndexedAttestation, + attestation_1: IndexedAttestation # Second attestation - 'attestation_2': IndexedAttestation, -} + attestation_2: IndexedAttestation ``` #### `Attestation` ```python -{ +class Attestation(Container): # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', + aggregation_bitfield: bytes # Attestation data - 'data': AttestationData, + data: AttestationData # Custody bitfield - 'custody_bitfield': 'bytes', + custody_bitfield: bytes # BLS aggregate signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` #### `Deposit` ```python -{ +class Deposit(Container): # Branch in the deposit tree - 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], + proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH] # Data - 'data': DepositData, -} + data: DepositData ``` #### `VoluntaryExit` ```python -{ +class VoluntaryExit(Container): # Minimum epoch for processing exit - 'epoch': 'uint64', + epoch: uint64 # Index of the exiting validator - 'validator_index': 'uint64', + validator_index: uint64 # Validator signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` #### `Transfer` ```python -{ +class Transfer(Container): # Sender index - 'sender': 'uint64', + sender: uint64 # Recipient index - 'recipient': 'uint64', + recipient: uint64 # Amount in Gwei - 'amount': 'uint64', + amount: uint64 # Fee in Gwei for block proposer - 'fee': 'uint64', + fee: uint64 # Inclusion slot - 'slot': 'uint64', + slot: uint64 # Sender withdrawal pubkey - 'pubkey': 'bytes48', + pubkey: Bytes48 # Sender signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` ### Beacon blocks @@ -515,30 +498,28 @@ The types are defined topologically to aid in facilitating an executable version #### `BeaconBlockBody` ```python -{ - 'randao_reveal': 'bytes96', - 'eth1_data': Eth1Data, - 'graffiti': 'bytes32', - 'proposer_slashings': [ProposerSlashing], - 'attester_slashings': [AttesterSlashing], - 'attestations': [Attestation], - 'deposits': [Deposit], - 'voluntary_exits': [VoluntaryExit], - 'transfers': [Transfer], -} +class BeaconBlockBody(Container): + randao_reveal: Bytes96 + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing] + attester_slashings: List[AttesterSlashing] + attestations: List[Attestation] + deposits: List[Deposit] + voluntary_exits: List[VoluntaryExit] + transfers: List[Transfer] ``` #### `BeaconBlock` ```python -{ +class BeaconBlock(Container): # Header - 'slot': 'uint64', - 'parent_root': 'bytes32', - 'state_root': 'bytes32', - 'body': BeaconBlockBody, - 'signature': 'bytes96', -} + slot: uint64 + parent_root: Bytes32 + state_root: Bytes32 + body: BeaconBlockBody + signature: Bytes96 ``` ### Beacon state @@ -546,46 +527,40 @@ The types are defined topologically to aid in facilitating an executable version #### `BeaconState` ```python -{ +class BeaconState(Container): # Misc - 'slot': 'uint64', - 'genesis_time': 'uint64', - 'fork': Fork, # For versioning hard forks - + slot: uint64 + genesis_time: uint64 + fork: Fork # For versioning hard forks # Validator registry - 'validator_registry': [Validator], - 'balances': ['uint64'], - + validator_registry: List[Validator] + balances: List[uint64] # Randomness and committees - 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'latest_start_shard': 'uint64', - + latest_randao_mixes: Vector[Bytes32, LATEST_RANDAO_MIXES_LENGTH] + latest_start_shard: uint64 # Finality - 'previous_epoch_attestations': [PendingAttestation], - 'current_epoch_attestations': [PendingAttestation], - 'previous_justified_epoch': 'uint64', - 'current_justified_epoch': 'uint64', - 'previous_justified_root': 'bytes32', - 'current_justified_root': 'bytes32', - 'justification_bitfield': 'uint64', - 'finalized_epoch': 'uint64', - 'finalized_root': 'bytes32', - + previous_epoch_attestations: List[PendingAttestation] + current_epoch_attestations: List[PendingAttestation] + previous_justified_epoch: uint64 + current_justified_epoch: uint64 + previous_justified_root: Bytes32 + current_justified_root: Bytes32 + justification_bitfield: uint64 + finalized_epoch: uint64 + finalized_root: Bytes32 # Recent state - 'current_crosslinks': [Crosslink, SHARD_COUNT], - 'previous_crosslinks': [Crosslink, SHARD_COUNT], - 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], - 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], - 'latest_block_header': BeaconBlockHeader, - 'historical_roots': ['bytes32'], - + current_crosslinks: Vector[Crosslink, SHARD_COUNT] + previous_crosslinks: Vector[Crosslink, SHARD_COUNT] + latest_block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] + latest_state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] + latest_active_index_roots: Vector[Bytes32, LATEST_ACTIVE_INDEX_ROOTS_LENGTH] + latest_slashed_balances: Vector[uint64, LATEST_SLASHED_EXIT_LENGTH] + latest_block_header: BeaconBlockHeader + historical_roots: List[Bytes32] # Ethereum 1.0 chain data - 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1Data], - 'deposit_index': 'uint64', -} + latest_eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data] + deposit_index: uint64 ``` ## Custom types @@ -599,9 +574,8 @@ We define the following Python custom types for type hinting and readability: | `Shard` | `uint64` | a shard number | | `ValidatorIndex` | `uint64` | a validator registry index | | `Gwei` | `uint64` | an amount in Gwei | -| `Bytes32` | `bytes32` | 32 bytes of binary data | -| `BLSPubkey` | `bytes48` | a BLS12-381 public key | -| `BLSSignature` | `bytes96` | a BLS12-381 signature | +| `BLSPubkey` | `Bytes48` | a BLS12-381 public key | +| `BLSSignature` | `Bytes96` | a BLS12-381 signature | ## Helper functions @@ -611,7 +585,7 @@ We define the following Python custom types for type hinting and readability: ```python def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: - return bytes(a ^ b for a, b in zip(bytes1, bytes2)) + return Bytes32(a ^ b for a, b in zip(bytes1, bytes2)) ``` ### `hash` @@ -626,7 +600,7 @@ The `hash` function is SHA256. ### `signing_root` -`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. +`def signing_root(object: Container) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. ### `bls_domain` @@ -1272,14 +1246,19 @@ def process_slot(state: BeaconState) -> None: ### Epoch processing +Note: the `# @LabelHere` lines below are placeholders to show that code will be inserted here in a future phase. + ```python def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_crosslinks(state) process_rewards_and_penalties(state) process_registry_updates(state) + # @process_reveal_deadlines + # @process_challenge_deadlines process_slashings(state) process_final_updates(state) + # @after_process_final_updates ``` #### Helper functions @@ -1433,7 +1412,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: matching_head_attestations = get_matching_head_attestations(state, previous_epoch) for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations): unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) - attesting_balance = get_attesting_balance(state, attestations) + attesting_balance = get_total_balance(state, unslashed_attesting_indices) for index in eligible_validator_indices: if index in unslashed_attesting_indices: rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 568879b17..6c89ef853 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -31,8 +31,7 @@ - [`BeaconState`](#beaconstate) - [`BeaconBlockBody`](#beaconblockbody) - [Helpers](#helpers) - - [`typeof`](#typeof) - - [`empty`](#empty) + - [`ceillog2`](#ceillog2) - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - [`get_chunk_bits_root`](#get_chunk_bits_root) @@ -121,66 +120,61 @@ This document details the beacon chain additions and changes in Phase 1 of Ether #### `CustodyChunkChallenge` ```python -{ - 'responder_index': ValidatorIndex, - 'attestation': Attestation, - 'chunk_index': 'uint64', -} +class CustodyChunkChallenge(Container): + responder_index: ValidatorIndex + attestation: Attestation + chunk_index: uint64 ``` #### `CustodyBitChallenge` ```python -{ - 'responder_index': ValidatorIndex, - 'attestation': Attestation, - 'challenger_index': ValidatorIndex, - 'responder_key': BLSSignature, - 'chunk_bits': Bitfield, - 'signature': BLSSignature, -} +class CustodyBitChallenge(Container): + responder_index: ValidatorIndex + attestation: Attestation + challenger_index: ValidatorIndex + responder_key: Bytes96 + chunk_bits: bytes + signature: Bytes96 ``` #### `CustodyChunkChallengeRecord` ```python -{ - 'challenge_index': 'uint64', - 'challenger_index': ValidatorIndex, - 'responder_index': ValidatorIndex, - 'inclusion_epoch': Epoch, - 'data_root': Hash, - 'depth': 'uint64', - 'chunk_index': 'uint64', -} +class CustodyChunkChallengeRecord(Container): + challenge_index: uint64 + challenger_index: ValidatorIndex + responder_index: ValidatorIndex + inclusion_epoch: Epoch + data_root: Bytes32 + depth: uint64 + chunk_index: uint64 ``` #### `CustodyBitChallengeRecord` ```python -{ - 'challenge_index': 'uint64', - 'challenger_index': ValidatorIndex, - 'responder_index': ValidatorIndex, - 'inclusion_epoch': Epoch, - 'data_root': Hash, - 'chunk_count': 'uint64', - 'chunk_bits_merkle_root': Hash, - 'responder_key': BLSSignature, -} +class CustodyBitChallengeRecord(Container): + challenge_index: uint64 + challenger_index: ValidatorIndex + responder_index: ValidatorIndex + inclusion_epoch: Epoch + data_root: Bytes32 + chunk_count: uint64 + chunk_bits_merkle_root: Bytes32 + responder_key: Bytes96 ``` #### `CustodyResponse` ```python -{ - 'challenge_index': 'uint64', - 'chunk_index': 'uint64', - 'chunk': ['byte', BYTES_PER_CUSTODY_CHUNK], - 'data_branch': [Hash], - 'chunk_bits_branch': [Hash], - 'chunk_bits_leaf': Hash, -} +class CustodyResponse(Container): + challenge_index: uint64 + chunk_index: uint64 + chunk: Vector[bytes, BYTES_PER_CUSTODY_CHUNK] + data_branch: List[Bytes32] + chunk_bits_branch: List[Bytes32] + chunk_bits_leaf: Bytes32 ``` ### New beacon operations @@ -188,12 +182,11 @@ This document details the beacon chain additions and changes in Phase 1 of Ether #### `CustodyKeyReveal` ```python -{ +class CustodyKeyReveal(Container): # Index of the validator whose key is being revealed - 'revealer_index': 'uint64', + revealer_index: uint64 # Reveal (masked signature) - 'reveal': 'bytes96', -} + reveal: Bytes96 ``` #### `EarlyDerivedSecretReveal` @@ -201,18 +194,17 @@ This document details the beacon chain additions and changes in Phase 1 of Ether Represents an early (punishable) reveal of one of the derived secrets, where derived secrets are RANDAO reveals and custody reveals (both are part of the same domain). ```python -{ +class EarlyDerivedSecretReveal(Container): # Index of the validator whose key is being revealed - 'revealed_index': 'uint64', + revealed_index: uint64 # RANDAO epoch of the key that is being revealed - 'epoch': 'uint64', + epoch: uint64 # Reveal (masked signature) - 'reveal': 'bytes96', + reveal: Bytes96 # Index of the validator who revealed (whistleblower) - 'masker_index': 'uint64', + masker_index: uint64 # Mask used to hide the actual reveal signature (prevent reveal from being stolen) - 'mask': 'bytes32', -} + mask: Bytes32 ``` ### Phase 0 container updates @@ -222,44 +214,46 @@ Add the following fields to the end of the specified container objects. Fields w #### `Validator` ```python - # next_custody_reveal_period is initialized to the custody period +class Validator(Container): + # next_custody_reveal_period is initialised to the custody period # (of the particular validator) in which the validator is activated # = get_validators_custody_reveal_period(...) - 'next_custody_reveal_period': 'uint64', - 'max_reveal_lateness': 'uint64', + next_custody_reveal_period: uint64 + max_reveal_lateness: uint64 ``` #### `BeaconState` ```python - 'custody_chunk_challenge_records': [CustodyChunkChallengeRecord], - 'custody_bit_challenge_records': [CustodyBitChallengeRecord], - 'custody_challenge_index': 'uint64', +class BeaconState(Container): + custody_chunk_challenge_records: List[CustodyChunkChallengeRecord] + custody_bit_challenge_records: List[CustodyBitChallengeRecord] + custody_challenge_index: uint64 # Future derived secrets already exposed; contains the indices of the exposed validator # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS - 'exposed_derived_secrets': [['uint64'], EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS], + exposed_derived_secrets: Vector[List[uint64], EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] ``` #### `BeaconBlockBody` ```python - 'custody_chunk_challenges': [CustodyChunkChallenge], - 'custody_bit_challenges': [CustodyBitChallenge], - 'custody_responses': [CustodyResponse], - 'custody_key_reveals': [CustodyKeyReveal], - 'early_derived_secret_reveals': [EarlyDerivedSecretReveal], +class BeaconBlockBody(Container): + custody_chunk_challenges: List[CustodyChunkChallenge] + custody_bit_challenges: List[CustodyBitChallenge] + custody_responses: List[CustodyResponse] + custody_key_reveals: List[CustodyKeyReveal] + early_derived_secret_reveals: List[EarlyDerivedSecretReveal] ``` ## Helpers -### `typeof` +### `ceillog2` -The `typeof` function accepts and SSZ object as a single input and returns the corresponding SSZ type. - -### `empty` - -The `empty` function accepts and SSZ type as input and returns an object of that type with all fields initialized to default values. +```python +def ceillog2(x): + return x.bit_length() +``` ### `get_crosslink_chunk_count` @@ -273,19 +267,19 @@ def get_custody_chunk_count(crosslink: Crosslink) -> int: ### `get_custody_chunk_bit` ```python -def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool: +def get_custody_chunk_bit(key: Bytes96, chunk: bytes) -> bool: # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol - return get_bitfield_bit(hash(challenge.responder_key + chunk), 0) + return get_bitfield_bit(hash(key + chunk), 0) ``` ### `get_chunk_bits_root` ```python -def get_chunk_bits_root(chunk_bitfield: Bitfield) -> Bytes32: +def get_chunk_bits_root(chunk_bitfield: bytes) -> Bytes32: aggregated_bits = bytearray([0] * 32) for i in range(0, len(chunk_bitfield), 32): for j in range(32): - aggregated_bits[j] ^= chunk_bitfield[i+j] + aggregated_bits[j] ^= chunk_bitfield[i + j] return hash(aggregated_bits) ``` @@ -315,11 +309,10 @@ def get_validators_custody_reveal_period(state: BeaconState, ### `replace_empty_or_append` - ```python def replace_empty_or_append(list: List[Any], new_element: Any) -> int: for i in range(len(list)): - if list[i] == empty(typeof(new_element)): + if is_empty(list[i]): list[i] = new_element return i list.append(new_element) @@ -340,8 +333,7 @@ For each `reveal` in `block.body.custody_key_reveals`, run the following functio ```python def process_custody_key_reveal(state: BeaconState, - reveal: CustodyKeyReveal) -> None: - + reveal: CustodyKeyReveal) -> None: """ Process ``CustodyKeyReveal`` operation. Note that this function mutates ``state``. @@ -369,15 +361,18 @@ def process_custody_key_reveal(state: BeaconState, # Decrement max reveal lateness if response is timely if revealer.next_custody_reveal_period == get_validators_custody_reveal_period(state, reveal.revealer_index) - 2: - revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT - revealer.max_reveal_lateness = max(revealed_validator.max_reveal_lateness, get_validators_custody_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period) + revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT + revealer.max_reveal_lateness = max( + revealer.max_reveal_lateness, + get_validators_custody_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period + ) # Process reveal revealer.next_custody_reveal_period += 1 # Reward Block Preposer proposer_index = get_beacon_proposer_index(state) - increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + increase_balance(state, proposer_index, get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT) ``` #### Early derived secret reveals @@ -388,7 +383,7 @@ For each `reveal` in `block.body.early_derived_secret_reveals`, run the followin ```python def process_early_derived_secret_reveal(state: BeaconState, - reveal: EarlyDerivedSecretReveal) -> None: + reveal: EarlyDerivedSecretReveal) -> None: """ Process ``EarlyDerivedSecretReveal`` operation. Note that this function mutates ``state``. @@ -396,11 +391,12 @@ def process_early_derived_secret_reveal(state: BeaconState, revealed_validator = state.validator_registry[reveal.revealed_index] masker = state.validator_registry[reveal.masker_index] + derived_secret_location = reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS assert reveal.epoch >= get_current_epoch(state) + RANDAO_PENALTY_EPOCHS assert reveal.epoch < get_current_epoch(state) + EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS assert revealed_validator.slashed is False - assert reveal.revealed_index not in state.exposed_derived_secrets[reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] + assert reveal.revealed_index not in state.exposed_derived_secrets[derived_secret_location] # Verify signature correctness masker = state.validator_registry[reveal.masker_index] @@ -432,12 +428,16 @@ def process_early_derived_secret_reveal(state: BeaconState, # Calculate penalty max_proposer_slot_reward = ( - get_base_reward(state, reveal.revealed_index) * - SLOTS_PER_EPOCH // - len(get_active_validator_indices(state, get_current_epoch(state))) // - PROPOSER_REWARD_QUOTIENT + get_base_reward(state, reveal.revealed_index) + * SLOTS_PER_EPOCH + // len(get_active_validator_indices(state, get_current_epoch(state))) + // PROPOSER_REWARD_QUOTIENT + ) + penalty = ( + max_proposer_slot_reward + * EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE + * (len(state.exposed_derived_secrets[derived_secret_location]) + 1) ) - penalty = max_proposer_slot_reward * EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE * (len(state.exposed_derived_secrets[reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]) + 1) # Apply penalty proposer_index = get_beacon_proposer_index(state) @@ -449,8 +449,7 @@ def process_early_derived_secret_reveal(state: BeaconState, decrease_balance(state, reveal.revealed_index, penalty) # Mark this derived secret as exposed so validator cannot be punished repeatedly - state.exposed_derived_secrets[reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS].append(reveal.revealed_index) - + state.exposed_derived_secrets[derived_secret_location].append(reveal.revealed_index) ``` #### Chunk challenges @@ -463,13 +462,13 @@ For each `challenge` in `block.body.custody_chunk_challenges`, run the following def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None: # Verify the attestation - assert verify_indexed_attestation(state, convert_to_indexed(state, challenge.attestation)) + validate_indexed_attestation(state, convert_to_indexed(state, challenge.attestation)) # Verify it is not too late to challenge assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY responder = state.validator_registry[challenge.responder_index] assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY # Verify the responder participated in the attestation - attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bitfield) assert challenge.responder_index in attesters # Verify the challenge is not a duplicate for record in state.custody_chunk_challenge_records: @@ -478,13 +477,13 @@ def process_chunk_challenge(state: BeaconState, record.chunk_index != challenge.chunk_index ) # Verify depth - depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation.data.crosslink))) + depth = ceillog2(get_custody_chunk_count(challenge.attestation.data.crosslink)) assert challenge.chunk_index < 2**depth # Add new chunk challenge record new_record = CustodyChunkChallengeRecord( challenge_index=state.custody_challenge_index, challenger_index=get_beacon_proposer_index(state), - responder_index=challenge.responder_index + responder_index=challenge.responder_index, inclusion_epoch=get_current_epoch(state), data_root=challenge.attestation.data.crosslink.data_root, depth=depth, @@ -518,10 +517,11 @@ def process_bit_challenge(state: BeaconState, assert is_slashable_validator(challenger, get_current_epoch(state)) # Verify the attestation - assert verify_indexed_attestation(state, convert_to_indexed(state, challenge.attestation)) + attestation = challenge.attestation + validate_indexed_attestation(state, convert_to_indexed(state, attestation)) # Verify the attestation is eligible for challenging responder = state.validator_registry[challenge.responder_index] - assert (slot_to_epoch(challenge.attestation.data.slot) + responder.max_reveal_lateness <= + assert (slot_to_epoch(attestation.data.slot) + responder.max_reveal_lateness <= get_validators_custody_reveal_period(state, challenge.responder_index)) # Verify the responder participated in the attestation @@ -537,7 +537,7 @@ def process_bit_challenge(state: BeaconState, get_validators_custody_reveal_period( state=state, index=challenge.responder_index, - epoch=slot_to_epoch(attestation.data.slot), + epoch=slot_to_epoch(attestation.data.slot)), challenge.responder_index ) assert bls_verify( @@ -552,10 +552,10 @@ def process_bit_challenge(state: BeaconState, ) # Verify the chunk count - chunk_count = get_custody_chunk_count(challenge.attestation.data.crosslink) + chunk_count = get_custody_chunk_count(attestation.data.crosslink) assert verify_bitfield(challenge.chunk_bits, chunk_count) # Verify the first bit of the hash of the chunk bits does not equal the custody bit - custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index)) + custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(challenge.responder_index)) assert custody_bit != get_bitfield_bit(get_chunk_bits_root(challenge.chunk_bits), 0) # Add new bit challenge record new_record = CustodyBitChallengeRecord( @@ -563,9 +563,9 @@ def process_bit_challenge(state: BeaconState, challenger_index=challenge.challenger_index, responder_index=challenge.responder_index, inclusion_epoch=get_current_epoch(state), - data_root=challenge.attestation.data.crosslink.data_root, + data_root=attestation.data.crosslink.data_root, chunk_count=chunk_count, - chunk_bits_merkle_root=merkle_root(pad_to_power_of_2((challenge.chunk_bits))), + chunk_bits_merkle_root=hash_tree_root(challenge.chunk_bits), responder_key=challenge.responder_key, ) replace_empty_or_append(state.custody_bit_challenge_records, new_record) @@ -584,11 +584,13 @@ For each `response` in `block.body.custody_responses`, run the following functio ```python def process_custody_response(state: BeaconState, response: CustodyResponse) -> None: - chunk_challenge = next(record for record in state.custody_chunk_challenge_records if record.challenge_index == response.challenge_index, None) + chunk_challenge = next((record for record in state.custody_chunk_challenge_records + if record.challenge_index == response.challenge_index), None) if chunk_challenge is not None: return process_chunk_challenge_response(state, response, chunk_challenge) - bit_challenge = next(record for record in state.custody_bit_challenge_records if record.challenge_index == response.challenge_index, None) + bit_challenge = next((record for record in state.custody_bit_challenge_records + if record.challenge_index == response.challenge_index), None) if bit_challenge is not None: return process_bit_challenge_response(state, response, bit_challenge) @@ -618,7 +620,7 @@ def process_chunk_challenge_response(state: BeaconState, records[records.index(challenge)] = CustodyChunkChallengeRecord() # Reward the proposer proposer_index = get_beacon_proposer_index(state) - increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + increase_balance(state, proposer_index, get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT) ``` ```python @@ -634,7 +636,7 @@ def process_bit_challenge_response(state: BeaconState, assert verify_merkle_branch( leaf=hash_tree_root(response.chunk), branch=response.data_branch, - depth=math.log2(next_power_of_two(challenge.chunk_count)), + depth=ceillog2(challenge.chunk_count), index=response.chunk_index, root=challenge.data_root, ) @@ -642,12 +644,13 @@ def process_bit_challenge_response(state: BeaconState, assert verify_merkle_branch( leaf=response.chunk_bits_leaf, branch=response.chunk_bits_branch, - depth=math.log2(next_power_of_two(challenge.chunk_count) // 256), + depth=ceillog2(challenge.chunk_count) >> 8, index=response.chunk_index // 256, root=challenge.chunk_bits_merkle_root ) # Verify the chunk bit does not match the challenge chunk bit - assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits_leaf, response.chunk_index % 256) + assert (get_custody_chunk_bit(challenge.responder_key, response.chunk) + != get_bitfield_bit(challenge.chunk_bits_leaf, response.chunk_index % 256)) # Clear the challenge records = state.custody_bit_challenge_records records[records.index(challenge)] = CustodyBitChallengeRecord() @@ -659,20 +662,25 @@ def process_bit_challenge_response(state: BeaconState, ### Handling of custody-related deadlines - Run `process_reveal_deadlines(state)` immediately after `process_ejections(state)`: +Run `process_reveal_deadlines(state)` immediately after `process_registry_updates(state)`: - ```python +```python +# begin insert @process_reveal_deadlines + process_reveal_deadlines(state) +# end insert @process_reveal_deadlines def process_reveal_deadlines(state: BeaconState) -> None: for index, validator in enumerate(state.validator_registry): - if (validator.latest_custody_reveal_period + - (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD) < - get_validators_custody_reveal_period(state, index)): - slash_validator(state, index) + deadline = validator.next_custody_reveal_period + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD) + if get_validators_custody_reveal_period(state, index) > deadline: + slash_validator(state, index) ``` Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadlines(state)`: ```python +# begin insert @process_challenge_deadlines + process_challenge_deadlines(state) +# end insert @process_challenge_deadlines def process_challenge_deadlines(state: BeaconState) -> None: for challenge in state.custody_chunk_challenge_records: if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE: @@ -690,10 +698,15 @@ def process_challenge_deadlines(state: BeaconState) -> None: Append this to `process_final_updates(state)`: ```python +# begin insert @after_process_final_updates + after_process_final_updates(state) +# end insert @after_process_final_updates +def after_process_final_updates(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) # Clean up exposed RANDAO key reveals state.exposed_derived_secrets[current_epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = [] # Reset withdrawable epochs if challenge records are empty - records = state.custody_chunk_challenge_records + state.bit_challenge_records + records = state.custody_chunk_challenge_records + state.custody_bit_challenge_records validator_indices_in_records = set( [record.challenger_index for record in records] + [record.responder_index for record in records] ) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 1d1186247..21e08e7c9 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -15,9 +15,9 @@ - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [`ShardBlockBody`](#shardblockbody) + - [`ShardAttestation`](#shardattestation) - [`ShardBlock`](#shardblock) - [`ShardBlockHeader`](#shardblockheader) - - [`ShardAttestation`](#shardattestation) - [Helper functions](#helper-functions) - [`get_period_committee`](#get_period_committee) - [`get_switchover_epoch`](#get_switchover_epoch) @@ -46,8 +46,9 @@ This document describes the shard data layer and the shard fork choice rule in P | - | - | | `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) | | `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) | -| `PHASE_1_GENESIS_EPOCH` | **TBD** | -| `PHASE_1_GENESIS_SLOT` | get_epoch_start_slot(PHASE_1_GENESIS_EPOCH) | +| `PHASE_1_FORK_EPOCH` | **TBD** | +| `PHASE_1_FORK_SLOT` | **TBD** | +| `GENESIS_SHARD_SLOT` | 0 | ### Time parameters @@ -55,6 +56,7 @@ This document describes the shard data layer and the shard fork choice rule in P | - | - | :-: | :-: | | `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.2 minutes | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | +| `SECONDS_PER_SLOT` | `2**1 * 3**1` (= 6) | 6 seconds | ### Signature domains @@ -68,51 +70,48 @@ This document describes the shard data layer and the shard fork choice rule in P ### `ShardBlockBody` ```python -['byte', BYTES_PER_SHARD_BLOCK_BODY] -``` - -### `ShardBlock` - -```python -{ - 'slot': Slot, - 'shard': Shard, - 'beacon_chain_root': Hash, - 'parent_root': Hash, - 'data': ShardBlockBody, - 'state_root': Hash, - 'attestations': [ShardAttestation], - 'signature': BLSSignature, -} -``` - -### `ShardBlockHeader` - -```python -{ - 'slot': Slot, - 'shard': Shard, - 'beacon_chain_root': Hash, - 'parent_root': Hash, - 'body_root': Hash, - 'state_root': Hash, - 'attestations': [ShardAttestation], - 'signature': BLSSignature, -} +class ShardBlockBody(Container): + data: Vector[bytes, BYTES_PER_SHARD_BLOCK_BODY] ``` ### `ShardAttestation` ```python -{ - 'data': { - 'slot': Slot, - 'shard': Shard, - 'shard_block_root': Hash, - }, - 'aggregation_bitfield': Bitfield, - 'aggregate_signature': BLSSignature, -} +class ShardAttestation(Container): + class data(Container): + slot: uint64 + shard: uint64 + shard_block_root: Bytes32 + aggregation_bitfield: bytes + aggregate_signature: Bytes96 +``` + +### `ShardBlock` + +```python +class ShardBlock(Container): + slot: uint64 + shard: uint64 + beacon_chain_root: Bytes32 + parent_root: Bytes32 + data: ShardBlockBody + state_root: Bytes32 + attestations: List[ShardAttestation] + signature: Bytes96 +``` + +### `ShardBlockHeader` + +```python +class ShardBlockHeader(Container): + slot: uint64 + shard: uint64 + beacon_chain_root: Bytes32 + parent_root: Bytes32 + body_root: Bytes32 + state_root: Bytes32 + attestations: List[ShardAttestation] + signature: Bytes96 ``` ## Helper functions @@ -120,7 +119,11 @@ This document describes the shard data layer and the shard fork choice rule in P ### `get_period_committee` ```python -def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard, index: int, count: int) -> List[ValidatorIndex]: +def get_period_committee(state: BeaconState, + epoch: Epoch, + shard: Shard, + index: int, + count: int) -> List[ValidatorIndex]: """ Return committee for a period. Used to construct persistent committees. """ @@ -137,7 +140,8 @@ def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard, index: ```python def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex): earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 - return bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD + return (bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8])) + % PERSISTENT_COMMITTEE_PERIOD) ``` ### `get_persistent_committee` @@ -198,14 +202,14 @@ def get_shard_proposer_index(state: BeaconState, ```python def get_shard_header(block: ShardBlock) -> ShardBlockHeader: return ShardBlockHeader( - slot: block.slot, - shard: block.shard, - beacon_chain_root: block.beacon_chain_root, - parent_root: block.parent_root, - body_root: hash_tree_root(block.body), - state_root: block.state_root, - attestations: block.attestations, - signature: block.signature, + slot=block.slot, + shard=block.shard, + beacon_chain_root=block.beacon_chain_root, + parent_root=block.parent_root, + body_root=hash_tree_root(block.body), + state_root=block.state_root, + attestations=block.attestations, + signature=block.signature, ) ``` @@ -219,7 +223,7 @@ def verify_shard_attestation_signature(state: BeaconState, assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee)) pubkeys = [] for i, index in enumerate(persistent_committee): - if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1 + if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1: validator = state.validator_registry[index] assert is_active_validator(validator, get_current_epoch(state)) pubkeys.append(validator.pubkey) @@ -234,7 +238,7 @@ def verify_shard_attestation_signature(state: BeaconState, ### `compute_crosslink_data_root` ```python -def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: +def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Bytes32: def is_power_of_two(value: int) -> bool: return (value > 0) and (value & (value - 1) == 0) @@ -243,15 +247,20 @@ def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: values += [b'\x00' * BYTES_PER_SHARD_BLOCK_BODY] return values - def merkle_root_of_bytes(data: bytes) -> bytes: - return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) + def hash_tree_root_of_bytes(data: bytes) -> bytes: + return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)]) + + def zpad(data: bytes, length: int) -> bytes: + return data + b'\x00' * (length - len(data)) return hash( - merkle_root(pad_to_power_of_2([ - merkle_root_of_bytes(zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY)) for block in blocks - ])) + - merkle_root(pad_to_power_of_2([ - merkle_root_of_bytes(block.body) for block in blocks + hash_tree_root(pad_to_power_of_2([ + hash_tree_root_of_bytes( + zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY) + ) for block in blocks + ])) + + hash_tree_root(pad_to_power_of_2([ + hash_tree_root_of_bytes(block.body) for block in blocks ])) ) ``` @@ -265,23 +274,20 @@ Let: * `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` * `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` * `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined -* `unix_time` be the current unix time * `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` ```python def is_valid_shard_block(beacon_blocks: List[BeaconBlock], beacon_state: BeaconState, valid_shard_blocks: List[ShardBlock], - unix_time: uint64, - candidate: ShardBlock) -> bool + candidate: ShardBlock) -> bool: # Check if block is already determined valid for _, block in enumerate(valid_shard_blocks): if candidate == block: return True # Check slot number - assert candidate.slot >= PHASE_1_GENESIS_SLOT - assert unix_time >= beacon_state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT + assert candidate.slot >= PHASE_1_FORK_SLOT # Check shard number assert candidate.shard <= SHARD_COUNT @@ -289,20 +295,20 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], # Check beacon block beacon_block = beacon_blocks[candidate.slot] assert candidate.beacon_block_root == signing_root(beacon_block) - assert beacon_block.slot <= candidate.slot: + assert beacon_block.slot <= candidate.slot # Check state root assert candidate.state_root == ZERO_HASH # [to be removed in phase 2] # Check parent block - if candidate.slot == PHASE_1_GENESIS_SLOT: + if candidate.slot == PHASE_1_FORK_SLOT: assert candidate.parent_root == ZERO_HASH else: parent_block = next( - block for block in valid_shard_blocks if - signing_root(block) == candidate.parent_root - , None) - assert parent_block != None + (block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root), + None + ) + assert parent_block is not None assert parent_block.shard == candidate.shard assert parent_block.slot < candidate.slot assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root @@ -319,10 +325,10 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], proposer_index = get_shard_proposer_index(beacon_state, candidate.shard, candidate.slot) assert proposer_index is not None assert bls_verify( - pubkey=validators[proposer_index].pubkey, + pubkey=beacon_state.validator_registry[proposer_index].pubkey, message_hash=signing_root(block), signature=candidate.signature, - domain=get_domain(beacon_state, slot_to_epoch(candidate.slot), DOMAIN_SHARD_PROPOSER) + domain=get_domain(beacon_state, slot_to_epoch(candidate.slot), DOMAIN_SHARD_PROPOSER), ) return True @@ -342,10 +348,10 @@ def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], candidate: ShardAttestation) -> bool: # Check shard block shard_block = next( - block for block in valid_shard_blocks if - signing_root(block) == candidate.data.shard_block_root - , None) - assert shard_block != None + (block for block in valid_shard_blocks if signing_root(block) == candidate.data.shard_block_root), + None, + ) + assert shard_block is not None assert shard_block.slot == candidate.data.slot assert shard_block.shard == candidate.data.shard @@ -377,18 +383,19 @@ def is_valid_beacon_attestation(shard: Shard, return True # Check previous attestation - if candidate.data.previous_crosslink.epoch <= PHASE_1_GENESIS_EPOCH: + if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH: assert candidate.data.previous_crosslink.data_root == ZERO_HASH else: previous_attestation = next( - attestation for attestation in valid_attestations if - attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root - , None) - assert previous_attestation != None + (attestation for attestation in valid_attestations if + attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root), + None, + ) + assert previous_attestation is not None assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot) # Check crosslink data root - start_epoch = state.latest_crosslinks[shard].epoch + start_epoch = beacon_state.latest_crosslinks[shard].epoch end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_EPOCHS_PER_CROSSLINK) blocks = [] for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 8633c7ed1..2adff2388 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -13,9 +13,11 @@ - [Composite types](#composite-types) - [Aliases](#aliases) - [Default values](#default-values) + - [Illegal types](#illegal-types) - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) + - [`"null`](#null) - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) - [Deserialization](#deserialization) - [Merkleization](#merkleization) @@ -66,6 +68,10 @@ For convenience we alias: The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. Unions default to the first type in the union (with type index zero), which is `"null"` if present in the union. +#### `is_empty` + +An SSZ object is called empty (and thus `is_empty(object)` returns true) if it is equal to the default value for that type. + ### Illegal types Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `"null"` type is only legal as the first type in a union subtype (i.e., with type index zero). @@ -161,7 +167,7 @@ Let `value` be a self-signed container object. The convention is that the signat | Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) | | TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | -| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | +| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | | C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) | | C++ | | Jiyun Kim | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1bb89f8e7..2f5aa4264 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -115,12 +115,11 @@ Once a validator has been processed and added to the beacon state's `validator_r In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). -The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: +The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows: ```python -shuffling_epoch = state.current_shuffling_epoch validator = state.validator_registry[validator_index] -is_active = is_active_validator(validator, shuffling_epoch) +is_active = is_active_validator(validator, get_current_epoch(state)) ``` Once a validator is activated, the validator is assigned [responsibilities](#beacon-chain-responsibilities) until exited. @@ -278,7 +277,7 @@ Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntar ### Attestations -A validator is expected to create, sign, and broadcast an attestation during each epoch. The committee, assigned shard, and assigned slot for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`. +A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `shard`, and assigned `slot` for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`. A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned ― that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`. diff --git a/test_generators/epoch_processing/main.py b/test_generators/epoch_processing/main.py index 8f067e4a3..2ce895fc5 100644 --- a/test_generators/epoch_processing/main.py +++ b/test_generators/epoch_processing/main.py @@ -1,6 +1,7 @@ from typing import Callable, Iterable -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth2spec.test.epoch_processing import ( test_process_crosslinks, test_process_registry_updates @@ -14,7 +15,8 @@ def create_suite(transition_name: str, config_name: str, get_cases: Callable[[], -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%s_%s" % (transition_name, config_name), transition_name, gen_suite.render_suite( title="%s epoch processing" % transition_name, diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py index 96c639d12..82e05b307 100644 --- a/test_generators/operations/main.py +++ b/test_generators/operations/main.py @@ -13,14 +13,16 @@ from eth2spec.test.block_processing import ( from gen_base import gen_runner, gen_suite, gen_typing from gen_from_tests.gen import generate_from_tests from preset_loader import loader -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 def create_suite(operation_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \ -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%s_%s" % (operation_name, config_name), operation_name, gen_suite.render_suite( title="%s operation" % operation_name, @@ -42,8 +44,8 @@ if __name__ == "__main__": create_suite('attester_slashing', 'mainnet', lambda: generate_from_tests(test_process_attester_slashing)), create_suite('block_header', 'minimal', lambda: generate_from_tests(test_process_block_header)), create_suite('block_header', 'mainnet', lambda: generate_from_tests(test_process_block_header)), - create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit)), - create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit)), + create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit)), + create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit)), create_suite('proposer_slashing', 'minimal', lambda: generate_from_tests(test_process_proposer_slashing)), create_suite('proposer_slashing', 'mainnet', lambda: generate_from_tests(test_process_proposer_slashing)), create_suite('transfer', 'minimal', lambda: generate_from_tests(test_process_transfer)), diff --git a/test_generators/sanity/main.py b/test_generators/sanity/main.py index bba6ed03d..a9c0fe160 100644 --- a/test_generators/sanity/main.py +++ b/test_generators/sanity/main.py @@ -5,14 +5,16 @@ from eth2spec.test.sanity import test_blocks, test_slots from gen_base import gen_runner, gen_suite, gen_typing from gen_from_tests.gen import generate_from_tests from preset_loader import loader -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \ -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%sanity_s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite( title="sanity testing", diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py index 711597189..862c4d910 100644 --- a/test_generators/shuffling/main.py +++ b/test_generators/shuffling/main.py @@ -1,4 +1,5 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth_utils import ( to_dict, to_tuple ) @@ -22,7 +23,8 @@ def shuffling_test_cases(): def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'minimal') - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("shuffling_minimal", "core", gen_suite.render_suite( title="Swap-or-Not Shuffling tests with minimal config", @@ -37,7 +39,8 @@ def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'mainnet') - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("shuffling_full", "core", gen_suite.render_suite( title="Swap-or-Not Shuffling tests with mainnet config", diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index e8995b918..7de5237d1 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -2,7 +2,7 @@ from random import Random from eth2spec.debug import random_value, encode from eth2spec.phase0 import spec -from eth2spec.utils.minimal_ssz import ( +from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, signing_root, serialize, diff --git a/test_libs/pyspec/eth2spec/test/block_processing/__init__.py b/test_libs/pyspec/__init__.py similarity index 100% rename from test_libs/pyspec/eth2spec/test/block_processing/__init__.py rename to test_libs/pyspec/__init__.py diff --git a/test_libs/pyspec/eth2spec/debug/decode.py b/test_libs/pyspec/eth2spec/debug/decode.py index e9aa8bc2b..5ce116025 100644 --- a/test_libs/pyspec/eth2spec/debug/decode.py +++ b/test_libs/pyspec/eth2spec/debug/decode.py @@ -1,28 +1,39 @@ -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, + is_vector_type, is_bytes_type, is_bytesn_type, is_container_type, + read_vector_elem_type, read_list_elem_type, + Vector, BytesN +) -def decode(json, typ): - if isinstance(typ, str) and typ[:4] == 'uint': - return json - elif typ == 'bool': - assert json in (True, False) - return json - elif isinstance(typ, list): - return [decode(element, typ[0]) for element in json] - elif isinstance(typ, str) and typ[:4] == 'byte': - return bytes.fromhex(json[2:]) - elif hasattr(typ, 'fields'): +def decode(data, typ): + if is_uint_type(typ): + return data + elif is_bool_type(typ): + assert data in (True, False) + return data + elif is_list_type(typ): + elem_typ = read_list_elem_type(typ) + return [decode(element, elem_typ) for element in data] + elif is_vector_type(typ): + elem_typ = read_vector_elem_type(typ) + return Vector(decode(element, elem_typ) for element in data) + elif is_bytes_type(typ): + return bytes.fromhex(data[2:]) + elif is_bytesn_type(typ): + return BytesN(bytes.fromhex(data[2:])) + elif is_container_type(typ): temp = {} - for field, subtype in typ.fields.items(): - temp[field] = decode(json[field], subtype) - if field + "_hash_tree_root" in json: - assert(json[field + "_hash_tree_root"][2:] == + for field, subtype in typ.get_fields(): + temp[field] = decode(data[field], subtype) + if field + "_hash_tree_root" in data: + assert(data[field + "_hash_tree_root"][2:] == hash_tree_root(temp[field], subtype).hex()) ret = typ(**temp) - if "hash_tree_root" in json: - assert(json["hash_tree_root"][2:] == + if "hash_tree_root" in data: + assert(data["hash_tree_root"][2:] == hash_tree_root(ret, typ).hex()) return ret else: - print(json, typ) - raise Exception("Type not recognized") + raise Exception(f"Type not recognized: data={data}, typ={typ}") diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py index b38e5fe98..61dd87928 100644 --- a/test_libs/pyspec/eth2spec/debug/encode.py +++ b/test_libs/pyspec/eth2spec/debug/encode.py @@ -1,27 +1,36 @@ -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, is_vector_type, is_container_type, + read_elem_type, + uint +) def encode(value, typ, include_hash_tree_roots=False): - if isinstance(typ, str) and typ[:4] == 'uint': - if typ[4:] == '128' or typ[4:] == '256': + if is_uint_type(typ): + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + # Larger uints are boxed and the class declares their byte length + if issubclass(typ, uint) and typ.byte_len > 8: return str(value) return value - elif typ == 'bool': + elif is_bool_type(typ): assert value in (True, False) return value - elif isinstance(typ, list): - return [encode(element, typ[0], include_hash_tree_roots) for element in value] - elif isinstance(typ, str) and typ[:4] == 'byte': + elif is_list_type(typ) or is_vector_type(typ): + elem_typ = read_elem_type(typ) + return [encode(element, elem_typ, include_hash_tree_roots) for element in value] + elif isinstance(typ, type) and issubclass(typ, bytes): # both bytes and BytesN return '0x' + value.hex() - elif hasattr(typ, 'fields'): + elif is_container_type(typ): ret = {} - for field, subtype in typ.fields.items(): - ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots) + for field, subtype in typ.get_fields(): + field_value = getattr(value, field) + ret[field] = encode(field_value, subtype, include_hash_tree_roots) if include_hash_tree_roots: - ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(field_value, subtype).hex() if include_hash_tree_roots: ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() return ret else: - print(value, typ) - raise Exception("Type not recognized") + raise Exception(f"Type not recognized: value={value}, typ={typ}") diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py index f28181943..3edcc8808 100644 --- a/test_libs/pyspec/eth2spec/debug/random_value.py +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -2,12 +2,19 @@ from random import Random from typing import Any from enum import Enum +from eth2spec.utils.ssz.ssz_impl import is_basic_type -UINT_SIZES = [8, 16, 32, 64, 128, 256] +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, + is_vector_type, is_bytes_type, is_bytesn_type, is_container_type, + read_vector_elem_type, read_list_elem_type, + uint_byte_size +) -basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte'] +# in bytes +UINT_SIZES = (1, 2, 4, 8, 16, 32) -random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"] +random_mode_names = ("random", "zero", "max", "nil", "one", "lengthy") class RandomizationMode(Enum): @@ -49,104 +56,103 @@ def get_random_ssz_object(rng: Random, """ if chaos: mode = rng.choice(list(RandomizationMode)) - if isinstance(typ, str): + if is_bytes_type(typ): # Bytes array - if typ == 'bytes': - if mode == RandomizationMode.mode_nil_count: - return b'' - if mode == RandomizationMode.mode_max_count: - return get_random_bytes_list(rng, max_bytes_length) - if mode == RandomizationMode.mode_one_count: - return get_random_bytes_list(rng, 1) - if mode == RandomizationMode.mode_zero: - return b'\x00' - if mode == RandomizationMode.mode_max: - return b'\xff' - return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) - elif typ[:5] == 'bytes' and len(typ) > 5: - length = int(typ[5:]) - # Sanity, don't generate absurdly big random values - # If a client is aiming to performance-test, they should create a benchmark suite. - assert length <= max_bytes_length - if mode == RandomizationMode.mode_zero: - return b'\x00' * length - if mode == RandomizationMode.mode_max: - return b'\xff' * length - return get_random_bytes_list(rng, length) - # Basic types + if mode == RandomizationMode.mode_nil_count: + return b'' + elif mode == RandomizationMode.mode_max_count: + return get_random_bytes_list(rng, max_bytes_length) + elif mode == RandomizationMode.mode_one_count: + return get_random_bytes_list(rng, 1) + elif mode == RandomizationMode.mode_zero: + return b'\x00' + elif mode == RandomizationMode.mode_max: + return b'\xff' + else: + return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) + elif is_bytesn_type(typ): + # BytesN + length = typ.length + # Sanity, don't generate absurdly big random values + # If a client is aiming to performance-test, they should create a benchmark suite. + assert length <= max_bytes_length + if mode == RandomizationMode.mode_zero: + return b'\x00' * length + elif mode == RandomizationMode.mode_max: + return b'\xff' * length + else: + return get_random_bytes_list(rng, length) + elif is_basic_type(typ): + # Basic types + if mode == RandomizationMode.mode_zero: + return get_min_basic_value(typ) + elif mode == RandomizationMode.mode_max: + return get_max_basic_value(typ) else: - if mode == RandomizationMode.mode_zero: - return get_min_basic_value(typ) - if mode == RandomizationMode.mode_max: - return get_max_basic_value(typ) return get_random_basic_value(rng, typ) - # Vector: - elif isinstance(typ, list) and len(typ) == 2: + elif is_vector_type(typ): + # Vector + elem_typ = read_vector_elem_type(typ) return [ - get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) - for _ in range(typ[1]) + get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos) + for _ in range(typ.length) ] - # List: - elif isinstance(typ, list) and len(typ) == 1: + elif is_list_type(typ): + # List + elem_typ = read_list_elem_type(typ) length = rng.randint(0, max_list_length) if mode == RandomizationMode.mode_one_count: length = 1 - if mode == RandomizationMode.mode_max_count: + elif mode == RandomizationMode.mode_max_count: length = max_list_length + return [ - get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) + get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos) for _ in range(length) ] - # Container: - elif hasattr(typ, 'fields'): + elif is_container_type(typ): + # Container return typ(**{ field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) - for field, subtype in typ.fields.items() + for field, subtype in typ.get_fields() }) else: - print(typ) - raise Exception("Type not recognized") + raise Exception(f"Type not recognized: typ={typ}") def get_random_bytes_list(rng: Random, length: int) -> bytes: return bytes(rng.getrandbits(8) for _ in range(length)) -def get_random_basic_value(rng: Random, typ: str) -> Any: - if typ == 'bool': +def get_random_basic_value(rng: Random, typ) -> Any: + if is_bool_type(typ): return rng.choice((True, False)) - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES - return rng.randint(0, 2**size - 1) - if typ == 'byte': - return rng.randint(0, 8) + return rng.randint(0, 256**size - 1) else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") -def get_min_basic_value(typ: str) -> Any: - if typ == 'bool': +def get_min_basic_value(typ) -> Any: + if is_bool_type(typ): return False - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES return 0 - if typ == 'byte': - return 0x00 else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") -def get_max_basic_value(typ: str) -> Any: - if typ == 'bool': +def get_max_basic_value(typ) -> Any: + if is_bool_type(typ): return True - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES - return 2**size - 1 - if typ == 'byte': - return 0xff + return 256**size - 1 else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py b/test_libs/pyspec/eth2spec/phase1/__init__.py similarity index 100% rename from test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py rename to test_libs/pyspec/eth2spec/phase1/__init__.py diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py deleted file mode 100644 index 700d68b53..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py +++ /dev/null @@ -1,301 +0,0 @@ -from copy import deepcopy - -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_current_epoch, - process_attestation, - process_slots, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.attestations import ( - get_valid_attestation, - sign_attestation, -) -from eth2spec.test.helpers.state import ( - next_epoch, - next_slot, -) -from eth2spec.test.helpers.block import apply_empty_block - - -def run_attestation_processing(state, attestation, valid=True): - """ - Run ``process_attestation``, yielding: - - pre-state ('pre') - - attestation ('attestation') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - # yield pre-state - yield 'pre', state - - yield 'attestation', attestation - - # If the attestation is invalid, processing is aborted, and there is no post-state. - if not valid: - expect_assertion_error(lambda: process_attestation(state, attestation)) - yield 'post', None - return - - current_epoch_count = len(state.current_epoch_attestations) - previous_epoch_count = len(state.previous_epoch_attestations) - - # process attestation - process_attestation(state, attestation) - - # Make sure the attestation has been processed - if attestation.data.target_epoch == get_current_epoch(state): - assert len(state.current_epoch_attestations) == current_epoch_count + 1 - else: - assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 - - # yield post-state - yield 'post', state - - -@spec_state_test -def test_success(state): - attestation = get_valid_attestation(state, signed=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - yield from run_attestation_processing(state, attestation) - - -@spec_state_test -def test_success_previous_epoch(state): - attestation = get_valid_attestation(state, signed=True) - next_epoch(state) - apply_empty_block(state) - - yield from run_attestation_processing(state, attestation) - - -@spec_state_test -def test_success_since_max_epochs_per_crosslink(state): - for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2): - next_epoch(state) - apply_empty_block(state) - - attestation = get_valid_attestation(state, signed=True) - data = attestation.data - # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs - assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK - - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(state) - apply_empty_block(state) - - yield from run_attestation_processing(state, attestation) - - -@always_bls -@spec_state_test -def test_invalid_attestation_signature(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_before_inclusion_delay(state): - attestation = get_valid_attestation(state, signed=True) - # do not increment slot to allow for inclusion delay - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_after_epoch_slots(state): - attestation = get_valid_attestation(state, signed=True) - # increment past latest inclusion slot - process_slots(state, state.slot + spec.SLOTS_PER_EPOCH + 1) - apply_empty_block(state) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_old_source_epoch(state): - state.slot = spec.SLOTS_PER_EPOCH * 5 - state.finalized_epoch = 2 - state.previous_justified_epoch = 3 - state.current_justified_epoch = 4 - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) - - # test logic sanity check: make sure the attestation is pointing to oldest known source epoch - assert attestation.data.source_epoch == state.previous_justified_epoch - - # Now go beyond that, it will be invalid - attestation.data.source_epoch -= 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_wrong_shard(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.crosslink.shard += 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_new_source_epoch(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_epoch += 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_source_root_is_target_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_root = attestation.data.target_root - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_invalid_current_source_root(state): - state.slot = spec.SLOTS_PER_EPOCH * 5 - state.finalized_epoch = 2 - - state.previous_justified_epoch = 3 - state.previous_justified_root = b'\x01' * 32 - - state.current_justified_epoch = 4 - state.current_justified_root = b'\xff' * 32 - - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - # Test logic sanity checks: - assert state.current_justified_root != state.previous_justified_root - assert attestation.data.source_root == state.previous_justified_root - - # Make attestation source root invalid: should be previous justified, not current one - attestation.data.source_root = state.current_justified_root - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_source_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_root = b'\x42' * 32 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_non_zero_crosslink_data_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.crosslink.data_root = b'\x42' * 32 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_parent_crosslink(state): - next_epoch(state) - apply_empty_block(state) - - attestation = get_valid_attestation(state, signed=True) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(state) - apply_empty_block(state) - - attestation.data.crosslink.parent_root = b'\x27' * 32 - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_crosslink_start_epoch(state): - next_epoch(state) - apply_empty_block(state) - - attestation = get_valid_attestation(state, signed=True) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(state) - apply_empty_block(state) - - attestation.data.crosslink.start_epoch += 1 - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_crosslink_end_epoch(state): - next_epoch(state) - apply_empty_block(state) - - attestation = get_valid_attestation(state, signed=True) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(state) - apply_empty_block(state) - - attestation.data.crosslink.end_epoch += 1 - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_inconsistent_bitfields(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00' - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_non_empty_custody_bitfield(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_empty_aggregation_bitfield(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py deleted file mode 100644 index 28e232277..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py +++ /dev/null @@ -1,149 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_beacon_proposer_index, - process_attester_slashing, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.attestations import sign_indexed_attestation -from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.state import ( - get_balance, - next_epoch, -) - - -def run_attester_slashing_processing(state, attester_slashing, valid=True): - """ - Run ``process_attester_slashing``, yielding: - - pre-state ('pre') - - attester_slashing ('attester_slashing') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - yield 'pre', state - yield 'attester_slashing', attester_slashing - - if not valid: - expect_assertion_error(lambda: process_attester_slashing(state, attester_slashing)) - yield 'post', None - return - - slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] - pre_slashed_balance = get_balance(state, slashed_index) - - proposer_index = get_beacon_proposer_index(state) - pre_proposer_balance = get_balance(state, proposer_index) - - # Process slashing - process_attester_slashing(state, attester_slashing) - - slashed_validator = state.validator_registry[slashed_index] - - # Check slashing - assert slashed_validator.slashed - assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH - assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - - if slashed_index != proposer_index: - # lost whistleblower reward - assert get_balance(state, slashed_index) < pre_slashed_balance - # gained whistleblower reward - assert get_balance(state, proposer_index) > pre_proposer_balance - else: - # gained rewards for all slashings, which may include others. And only lost that of themselves. - # Netto at least 0, if more people where slashed, a balance increase. - assert get_balance(state, slashed_index) >= pre_slashed_balance - - yield 'post', state - - -@spec_state_test -def test_success_double(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) - - yield from run_attester_slashing_processing(state, attester_slashing) - - -@spec_state_test -def test_success_surround(state): - next_epoch(state) - apply_empty_block(state) - - state.current_justified_epoch += 1 - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - # set attestion1 to surround attestation 2 - attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 - attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1 - - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing) - - -@always_bls -@spec_state_test -def test_invalid_sig_1(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_2(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=False) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_1_and_2(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=False) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_same_data(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.data = attester_slashing.attestation_2.data - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_no_double_or_surround(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.data.target_epoch += 1 - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_participants_already_slashed(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) - - # set all indices to slashed - attestation_1 = attester_slashing.attestation_1 - validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices - for index in validator_indices: - state.validator_registry[index].slashed = True - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_custody_bit_0_and_1(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.custody_bit_1_indices = ( - attester_slashing.attestation_1.custody_bit_0_indices - ) - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py deleted file mode 100644 index 8a67be741..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py +++ /dev/null @@ -1,85 +0,0 @@ -from copy import deepcopy - -from eth2spec.phase0.spec import ( - get_beacon_proposer_index, - process_slots, - process_block_header, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, - sign_block -) -from eth2spec.test.helpers.state import next_slot - - -def prepare_state_for_header_processing(state): - process_slots(state, state.slot + 1) - - -def run_block_header_processing(state, block, valid=True): - """ - Run ``process_block_header``, yielding: - - pre-state ('pre') - - block ('block') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - prepare_state_for_header_processing(state) - - yield 'pre', state - yield 'block', block - - if not valid: - expect_assertion_error(lambda: process_block_header(state, block)) - yield 'post', None - return - - process_block_header(state, block) - yield 'post', state - - -@spec_state_test -def test_success_block_header(state): - block = build_empty_block_for_next_slot(state, signed=True) - yield from run_block_header_processing(state, block) - - -@always_bls -@spec_state_test -def test_invalid_sig_block_header(state): - block = build_empty_block_for_next_slot(state) - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_invalid_slot_block_header(state): - block = build_empty_block_for_next_slot(state) - block.slot = state.slot + 2 # invalid slot - sign_block(state, block) - - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_invalid_parent_root(state): - block = build_empty_block_for_next_slot(state) - block.parent_root = b'\12' * 32 # invalid prev root - sign_block(state, block) - - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_proposer_slashed(state): - # use stub state to get proposer index of next slot - stub_state = deepcopy(state) - next_slot(stub_state) - proposer_index = get_beacon_proposer_index(stub_state) - - # set proposer to slashed - state.validator_registry[proposer_index].slashed = True - - block = build_empty_block_for_next_slot(state, signed=True) - - yield from run_block_header_processing(state, block, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py deleted file mode 100644 index 07ccc25f1..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py +++ /dev/null @@ -1,137 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_current_epoch, - process_proposer_slashing, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.block_header import sign_block_header -from eth2spec.test.helpers.keys import privkeys -from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing -from eth2spec.test.helpers.state import get_balance - - -def run_proposer_slashing_processing(state, proposer_slashing, valid=True): - """ - Run ``process_proposer_slashing``, yielding: - - pre-state ('pre') - - proposer_slashing ('proposer_slashing') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - yield 'pre', state - yield 'proposer_slashing', proposer_slashing - - if not valid: - expect_assertion_error(lambda: process_proposer_slashing(state, proposer_slashing)) - yield 'post', None - return - - pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index) - - process_proposer_slashing(state, proposer_slashing) - yield 'post', state - - # check if slashed - slashed_validator = state.validator_registry[proposer_slashing.proposer_index] - assert slashed_validator.slashed - assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH - assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - - # lost whistleblower reward - assert ( - get_balance(state, proposer_slashing.proposer_index) < - pre_proposer_balance - ) - - -@spec_state_test -def test_success(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - yield from run_proposer_slashing_processing(state, proposer_slashing) - - -@always_bls -@spec_state_test -def test_invalid_sig_1(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=True) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_2(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_1_and_2(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=False) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_invalid_proposer_index(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - # Index just too high (by 1) - proposer_slashing.proposer_index = len(state.validator_registry) - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_epochs_are_different(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - - # set slots to be in different epochs - proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH - sign_block_header(state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index]) - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_headers_are_same(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - - # set headers to be the same - proposer_slashing.header_2 = proposer_slashing.header_1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_not_activated(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # set proposer to be not active yet - state.validator_registry[proposer_slashing.proposer_index].activation_epoch = get_current_epoch(state) + 1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_slashed(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # set proposer to slashed - state.validator_registry[proposer_slashing.proposer_index].slashed = True - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_withdrawn(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # move 1 epoch into future, to allow for past withdrawable epoch - state.slot += spec.SLOTS_PER_EPOCH - # set proposer withdrawable_epoch in past - current_epoch = get_current_epoch(state) - proposer_index = proposer_slashing.proposer_index - state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py deleted file mode 100644 index bd435d67a..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py +++ /dev/null @@ -1,178 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_active_validator_indices, - get_beacon_proposer_index, - get_current_epoch, - process_transfer, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.state import next_epoch -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.transfers import get_valid_transfer - - -def run_transfer_processing(state, transfer, valid=True): - """ - Run ``process_transfer``, yielding: - - pre-state ('pre') - - transfer ('transfer') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - proposer_index = get_beacon_proposer_index(state) - pre_transfer_sender_balance = state.balances[transfer.sender] - pre_transfer_recipient_balance = state.balances[transfer.recipient] - pre_transfer_proposer_balance = state.balances[proposer_index] - - yield 'pre', state - yield 'transfer', transfer - - if not valid: - expect_assertion_error(lambda: process_transfer(state, transfer)) - yield 'post', None - return - - process_transfer(state, transfer) - yield 'post', state - - sender_balance = state.balances[transfer.sender] - recipient_balance = state.balances[transfer.recipient] - assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee - assert recipient_balance == pre_transfer_recipient_balance + transfer.amount - assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee - - -@spec_state_test -def test_success_non_activated(state): - transfer = get_valid_transfer(state, signed=True) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_withdrawable(state): - next_epoch(state) - apply_empty_block(state) - - transfer = get_valid_transfer(state, signed=True) - - # withdrawable_epoch in past so can transfer - state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1 - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_active_above_max_effective(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_active_above_max_effective_fee(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) - - yield from run_transfer_processing(state, transfer) - - -@always_bls -@spec_state_test -def test_invalid_signature(state): - transfer = get_valid_transfer(state) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_active_but_transfer_past_effective_balance(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - amount = spec.MAX_EFFECTIVE_BALANCE // 32 - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0, signed=True) - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_incorrect_slot(state): - transfer = get_valid_transfer(state, slot=state.slot + 1, signed=True) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_insufficient_balance_for_fee(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_insufficient_balance(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_no_dust_sender(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - balance = state.balances[sender_index] - transfer = get_valid_transfer( - state, - sender_index=sender_index, - amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, - fee=0, - signed=True, - ) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_no_dust_recipient(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - state.balances[transfer.recipient] = 0 - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_invalid_pubkey(state): - transfer = get_valid_transfer(state, signed=True) - state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) diff --git a/test_libs/pyspec/eth2spec/test/conftest.py b/test_libs/pyspec/eth2spec/test/conftest.py index 5e8ec708a..5713c3470 100644 --- a/test_libs/pyspec/eth2spec/test/conftest.py +++ b/test_libs/pyspec/eth2spec/test/conftest.py @@ -1,4 +1,5 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 # We import pytest only when it's present, i.e. when we are running tests. # The test-cases themselves can be generated without installing pytest. @@ -34,4 +35,5 @@ def config(request): config_name = request.config.getoption("--config") from preset_loader import loader presets = loader.load_presets('../../configs/', config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 2be9322de..cbc594cd8 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -1,12 +1,20 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth2spec.utils import bls from .helpers.genesis import create_genesis_state -from .utils import spectest, with_args, with_tags +from .utils import spectest, with_tags -# Provides a genesis state as first argument to the function decorated with this -with_state = with_args(lambda: [create_genesis_state(spec.SLOTS_PER_EPOCH * 8)]) + +def with_state(fn): + def entry(*args, **kw): + try: + kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 8) + except KeyError: + raise TypeError('Spec decorator must come before state decorator to inject spec into state.') + return fn(*args, **kw) + return entry # BLS is turned off by default *for performance purposes during TESTING*. @@ -80,3 +88,40 @@ def bls_switch(fn): bls.bls_active = old_state return out return entry + + +all_phases = ['phase0', 'phase1'] + + +def with_all_phases(fn): + """ + A decorator for running a test wil every phase + """ + return with_phases(all_phases)(fn) + + +def with_all_phases_except(exclusion_phases): + """ + A decorator factory for running a tests with every phase except the ones listed + """ + def decorator(fn): + return with_phases([phase for phase in all_phases if phase not in exclusion_phases])(fn) + return decorator + + +def with_phases(phases): + """ + Decorator factory that returns a decorator that runs a test for the appropriate phases + """ + def decorator(fn): + def run_with_spec_version(spec, *args, **kw): + kw['spec'] = spec + fn(*args, **kw) + + def wrapper(*args, **kw): + if 'phase0' in phases: + run_with_spec_version(spec_phase0, *args, **kw) + if 'phase1' in phases: + run_with_spec_version(spec_phase1, *args, **kw) + return wrapper + return decorator diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 6ac0b994e..4c8b5c7eb 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -1,39 +1,27 @@ from typing import List -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - Attestation, - AttestationData, - AttestationDataAndCustodyBit, - Crosslink, - get_epoch_start_slot, get_block_root, get_current_epoch, get_previous_epoch, slot_to_epoch, - get_crosslink_committee, get_domain, IndexedAttestation, get_attesting_indices, BeaconState, get_block_root_at_slot, - get_epoch_start_shard, get_epoch_committee_count, - state_transition, process_slots, -) from eth2spec.test.helpers.bitfields import set_bitfield_bit from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root -def build_attestation_data(state, slot, shard): +def build_attestation_data(spec, state, slot, shard): assert state.slot >= slot if slot == state.slot: - block_root = build_empty_block_for_next_slot(state).parent_root + block_root = build_empty_block_for_next_slot(spec, state).parent_root else: - block_root = get_block_root_at_slot(state, slot) + block_root = spec.get_block_root_at_slot(state, slot) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + current_epoch_start_slot = spec.get_epoch_start_slot(spec.get_current_epoch(state)) if slot < current_epoch_start_slot: - epoch_boundary_root = get_block_root(state, get_previous_epoch(state)) + epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state)) elif slot == current_epoch_start_slot: epoch_boundary_root = block_root else: - epoch_boundary_root = get_block_root(state, get_current_epoch(state)) + epoch_boundary_root = spec.get_block_root(state, spec.get_current_epoch(state)) if slot < current_epoch_start_slot: justified_epoch = state.previous_justified_epoch @@ -42,39 +30,39 @@ def build_attestation_data(state, slot, shard): justified_epoch = state.current_justified_epoch justified_block_root = state.current_justified_root - if slot_to_epoch(slot) == get_current_epoch(state): + if spec.slot_to_epoch(slot) == spec.get_current_epoch(state): parent_crosslink = state.current_crosslinks[shard] else: parent_crosslink = state.previous_crosslinks[shard] - return AttestationData( + return spec.AttestationData( beacon_block_root=block_root, source_epoch=justified_epoch, source_root=justified_block_root, - target_epoch=slot_to_epoch(slot), + target_epoch=spec.slot_to_epoch(slot), target_root=epoch_boundary_root, - crosslink=Crosslink( + crosslink=spec.Crosslink( shard=shard, start_epoch=parent_crosslink.end_epoch, - end_epoch=min(slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK), + end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK), data_root=spec.ZERO_HASH, parent_root=hash_tree_root(parent_crosslink), ), ) -def get_valid_attestation(state, slot=None, signed=False): +def get_valid_attestation(spec, state, slot=None, signed=False): if slot is None: slot = state.slot - epoch = slot_to_epoch(slot) - epoch_start_shard = get_epoch_start_shard(state, epoch) - committees_per_slot = get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH + epoch = spec.slot_to_epoch(slot) + epoch_start_shard = spec.get_epoch_start_shard(state, epoch) + committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT - attestation_data = build_attestation_data(state, slot, shard) + attestation_data = build_attestation_data(spec, state, slot, shard) - crosslink_committee = get_crosslink_committee( + crosslink_committee = spec.get_crosslink_committee( state, attestation_data.target_epoch, attestation_data.crosslink.shard @@ -84,25 +72,26 @@ def get_valid_attestation(state, slot=None, signed=False): bitfield_length = (committee_size + 7) // 8 aggregation_bitfield = b'\x00' * bitfield_length custody_bitfield = b'\x00' * bitfield_length - attestation = Attestation( + attestation = spec.Attestation( aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, ) - fill_aggregate_attestation(state, attestation) + fill_aggregate_attestation(spec, state, attestation) if signed: - sign_attestation(state, attestation) + sign_attestation(spec, state, attestation) return attestation -def sign_aggregate_attestation(state: BeaconState, data: AttestationData, participants: List[int]): +def sign_aggregate_attestation(spec, state, attestation_data, participants: List[int]): signatures = [] for validator_index in participants: privkey = privkeys[validator_index] signatures.append( get_attestation_signature( + spec, state, - data, + attestation_data, privkey ) ) @@ -110,23 +99,23 @@ def sign_aggregate_attestation(state: BeaconState, data: AttestationData, partic return bls_aggregate_signatures(signatures) -def sign_indexed_attestation(state, indexed_attestation: IndexedAttestation): +def sign_indexed_attestation(spec, state, indexed_attestation): participants = indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices - indexed_attestation.signature = sign_aggregate_attestation(state, indexed_attestation.data, participants) + indexed_attestation.signature = sign_aggregate_attestation(spec, state, indexed_attestation.data, participants) -def sign_attestation(state, attestation: Attestation): - participants = get_attesting_indices( +def sign_attestation(spec, state, attestation): + participants = spec.get_attesting_indices( state, attestation.data, attestation.aggregation_bitfield, ) - attestation.signature = sign_aggregate_attestation(state, attestation.data, participants) + attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants) -def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): - message_hash = AttestationDataAndCustodyBit( +def get_attestation_signature(spec, state, attestation_data, privkey, custody_bit=0b0): + message_hash = spec.AttestationDataAndCustodyBit( data=attestation_data, custody_bit=custody_bit, ).hash_tree_root() @@ -134,7 +123,7 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) return bls_sign( message_hash=message_hash, privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_ATTESTATION, message_epoch=attestation_data.target_epoch, @@ -142,8 +131,8 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) ) -def fill_aggregate_attestation(state, attestation): - crosslink_committee = get_crosslink_committee( +def fill_aggregate_attestation(spec, state, attestation): + crosslink_committee = spec.get_crosslink_committee( state, attestation.data.target_epoch, attestation.data.crosslink.shard, @@ -152,10 +141,10 @@ def fill_aggregate_attestation(state, attestation): attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) -def add_attestation_to_state(state, attestation, slot): - block = build_empty_block_for_next_slot(state) +def add_attestation_to_state(spec, state, attestation, slot): + block = build_empty_block_for_next_slot(spec, state) block.slot = slot block.body.attestations.append(attestation) - process_slots(state, block.slot) - sign_block(state, block) - state_transition(state, block) + spec.process_slots(state, block.slot) + sign_block(spec, state, block) + spec.state_transition(state, block) diff --git a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py index d19b41dfe..9fd34520c 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py @@ -1,19 +1,18 @@ from copy import deepcopy -from eth2spec.phase0.spec import AttesterSlashing, convert_to_indexed from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation -def get_valid_attester_slashing(state, signed_1=False, signed_2=False): - attestation_1 = get_valid_attestation(state, signed=signed_1) +def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False): + attestation_1 = get_valid_attestation(spec, state, signed=signed_1) attestation_2 = deepcopy(attestation_1) attestation_2.data.target_root = b'\x01' * 32 if signed_2: - sign_attestation(state, attestation_2) + sign_attestation(spec, state, attestation_2) - return AttesterSlashing( - attestation_1=convert_to_indexed(state, attestation_1), - attestation_2=convert_to_indexed(state, attestation_2), + return spec.AttesterSlashing( + attestation_1=spec.convert_to_indexed(state, attestation_1), + attestation_2=spec.convert_to_indexed(state, attestation_2), ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py index 7c25d073a..50e5b6cba 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py +++ b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py @@ -5,7 +5,7 @@ def set_bitfield_bit(bitfield, i): byte_index = i // 8 bit_index = i % 8 return ( - bitfield[:byte_index] + - bytes([bitfield[byte_index] | (1 << bit_index)]) + - bitfield[byte_index + 1:] + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index + 1:] ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py index 715cf82db..5c7cb02a0 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/block.py @@ -1,67 +1,61 @@ from copy import deepcopy -from eth2spec.phase0 import spec -from eth2spec.phase0.spec import ( - BeaconBlock, - get_beacon_proposer_index, slot_to_epoch, get_domain, - process_slots, state_transition, -) from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, only_with_bls -from eth2spec.utils.minimal_ssz import signing_root, hash_tree_root +from eth2spec.utils.ssz.ssz_impl import signing_root, hash_tree_root # Fully ignore the function if BLS is off, beacon-proposer index calculation is slow. @only_with_bls() -def sign_block(state, block, proposer_index=None): +def sign_block(spec, state, block, proposer_index=None): assert state.slot <= block.slot if proposer_index is None: if block.slot == state.slot: - proposer_index = get_beacon_proposer_index(state) + proposer_index = spec.get_beacon_proposer_index(state) else: - if slot_to_epoch(state.slot) + 1 > slot_to_epoch(block.slot): + if spec.slot_to_epoch(state.slot) + 1 > spec.slot_to_epoch(block.slot): print("warning: block slot far away, and no proposer index manually given." " Signing block is slow due to transition for proposer index calculation.") # use stub state to get proposer index of future slot stub_state = deepcopy(state) - process_slots(stub_state, block.slot) - proposer_index = get_beacon_proposer_index(stub_state) + spec.process_slots(stub_state, block.slot) + proposer_index = spec.get_beacon_proposer_index(stub_state) privkey = privkeys[proposer_index] block.body.randao_reveal = bls_sign( privkey=privkey, - message_hash=hash_tree_root(slot_to_epoch(block.slot)), - domain=get_domain( + message_hash=hash_tree_root(spec.slot_to_epoch(block.slot)), + domain=spec.get_domain( state, - message_epoch=slot_to_epoch(block.slot), + message_epoch=spec.slot_to_epoch(block.slot), domain_type=spec.DOMAIN_RANDAO, ) ) block.signature = bls_sign( message_hash=signing_root(block), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state, spec.DOMAIN_BEACON_PROPOSER, - slot_to_epoch(block.slot))) + spec.slot_to_epoch(block.slot))) -def apply_empty_block(state): +def apply_empty_block(spec, state): """ Transition via an empty block (on current slot, assuming no block has been applied yet). :return: the empty block that triggered the transition. """ - block = build_empty_block(state, signed=True) - state_transition(state, block) + block = build_empty_block(spec, state, signed=True) + spec.state_transition(state, block) return block -def build_empty_block(state, slot=None, signed=False): +def build_empty_block(spec, state, slot=None, signed=False): if slot is None: slot = state.slot - empty_block = BeaconBlock() + empty_block = spec.BeaconBlock() empty_block.slot = slot empty_block.body.eth1_data.deposit_count = state.deposit_index previous_block_header = deepcopy(state.latest_block_header) @@ -70,10 +64,10 @@ def build_empty_block(state, slot=None, signed=False): empty_block.parent_root = signing_root(previous_block_header) if signed: - sign_block(state, empty_block) + sign_block(spec, state, empty_block) return empty_block -def build_empty_block_for_next_slot(state, signed=False): - return build_empty_block(state, state.slot + 1, signed=signed) +def build_empty_block_for_next_slot(spec, state, signed=False): + return build_empty_block(spec, state, state.slot + 1, signed=signed) diff --git a/test_libs/pyspec/eth2spec/test/helpers/block_header.py b/test_libs/pyspec/eth2spec/test/helpers/block_header.py index 9aba62d37..456414112 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/block_header.py +++ b/test_libs/pyspec/eth2spec/test/helpers/block_header.py @@ -1,13 +1,9 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_domain from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def sign_block_header(state, header, privkey): - domain = get_domain( +def sign_block_header(spec, state, header, privkey): + domain = spec.get_domain( state=state, domain_type=spec.DOMAIN_BEACON_PROPOSER, ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py new file mode 100644 index 000000000..67df12fcd --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py @@ -0,0 +1,38 @@ +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import bls_sign + + +def get_valid_early_derived_secret_reveal(spec, state, epoch=None): + current_epoch = spec.get_current_epoch(state) + revealed_index = spec.get_active_validator_indices(state, current_epoch)[-1] + masker_index = spec.get_active_validator_indices(state, current_epoch)[0] + + if epoch is None: + epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING + + reveal = bls_sign( + message_hash=spec.hash_tree_root(epoch), + privkey=privkeys[revealed_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch, + ), + ) + mask = bls_sign( + message_hash=spec.hash_tree_root(epoch), + privkey=privkeys[masker_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch, + ), + ) + + return spec.EarlyDerivedSecretReveal( + revealed_index=revealed_index, + epoch=epoch, + reveal=reveal, + masker_index=masker_index, + mask=mask, + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py index 2db3ae03c..c85d265eb 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/deposits.py +++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py @@ -1,29 +1,25 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_domain, DepositData, verify_merkle_branch, Deposit, ZERO_HASH from eth2spec.test.helpers.keys import pubkeys, privkeys from eth2spec.utils.bls import bls_sign from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def build_deposit_data(state, pubkey, privkey, amount, withdrawal_credentials, signed=False): - deposit_data = DepositData( +def build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed=False): + deposit_data = spec.DepositData( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, ) if signed: - sign_deposit_data(state, deposit_data, privkey) + sign_deposit_data(spec, state, deposit_data, privkey) return deposit_data -def sign_deposit_data(state, deposit_data, privkey): +def sign_deposit_data(spec, state, deposit_data, privkey): signature = bls_sign( message_hash=signing_root(deposit_data), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state, spec.DOMAIN_DEPOSIT, ) @@ -31,14 +27,15 @@ def sign_deposit_data(state, deposit_data, privkey): deposit_data.signature = signature -def build_deposit(state, +def build_deposit(spec, + state, deposit_data_leaves, pubkey, privkey, amount, withdrawal_credentials, signed): - deposit_data = build_deposit_data(state, pubkey, privkey, amount, withdrawal_credentials, signed) + deposit_data = build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed) item = deposit_data.hash_tree_root() index = len(deposit_data_leaves) @@ -46,9 +43,9 @@ def build_deposit(state, tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) root = get_merkle_root((tuple(deposit_data_leaves))) proof = list(get_merkle_proof(tree, item_index=index)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) - deposit = Deposit( + deposit = spec.Deposit( proof=list(proof), index=index, data=deposit_data, @@ -57,13 +54,13 @@ def build_deposit(state, return deposit, root, deposit_data_leaves -def prepare_state_and_deposit(state, validator_index, amount, withdrawal_credentials=None, signed=False): +def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False): """ Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount. """ pre_validator_count = len(state.validator_registry) # fill previous deposits with zero-hash - deposit_data_leaves = [ZERO_HASH] * pre_validator_count + deposit_data_leaves = [spec.ZERO_HASH] * pre_validator_count pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] @@ -73,6 +70,7 @@ def prepare_state_and_deposit(state, validator_index, amount, withdrawal_credent withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:] deposit, root, deposit_data_leaves = build_deposit( + spec, state, deposit_data_leaves, pubkey, diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index 01011cacd..83af56621 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -1,12 +1,8 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import Eth1Data, ZERO_HASH, get_active_validator_indices from eth2spec.test.helpers.keys import pubkeys -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root -def build_mock_validator(i: int, balance: int): +def build_mock_validator(spec, i: int, balance: int): pubkey = pubkeys[i] # insecurely use pubkey as withdrawal key as well withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:] @@ -21,22 +17,22 @@ def build_mock_validator(i: int, balance: int): ) -def create_genesis_state(num_validators): +def create_genesis_state(spec, num_validators): deposit_root = b'\x42' * 32 state = spec.BeaconState( genesis_time=0, deposit_index=num_validators, - latest_eth1_data=Eth1Data( + latest_eth1_data=spec.Eth1Data( deposit_root=deposit_root, deposit_count=num_validators, - block_hash=ZERO_HASH, + block_hash=spec.ZERO_HASH, )) # We "hack" in the initial validators, # as it is much faster than creating and processing genesis deposits for every single test case. state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators - state.validator_registry = [build_mock_validator(i, state.balances[i]) for i in range(num_validators)] + state.validator_registry = [build_mock_validator(spec, i, state.balances[i]) for i in range(num_validators)] # Process genesis activations for validator in state.validator_registry: @@ -44,7 +40,7 @@ def create_genesis_state(num_validators): validator.activation_eligibility_epoch = spec.GENESIS_EPOCH validator.activation_epoch = spec.GENESIS_EPOCH - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, spec.GENESIS_EPOCH)) + genesis_active_index_root = hash_tree_root(spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)) for index in range(spec.LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py index 02629f7da..86c6acf47 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py +++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -1,19 +1,16 @@ from copy import deepcopy -from eth2spec.phase0.spec import ( - get_current_epoch, get_active_validator_indices, BeaconBlockHeader, ProposerSlashing -) from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import pubkey_to_privkey -def get_valid_proposer_slashing(state, signed_1=False, signed_2=False): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[-1] +def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] slot = state.slot - header_1 = BeaconBlockHeader( + header_1 = spec.BeaconBlockHeader( slot=slot, parent_root=b'\x33' * 32, state_root=b'\x44' * 32, @@ -24,11 +21,11 @@ def get_valid_proposer_slashing(state, signed_1=False, signed_2=False): header_2.slot = slot + 1 if signed_1: - sign_block_header(state, header_1, privkey) + sign_block_header(spec, state, header_1, privkey) if signed_2: - sign_block_header(state, header_2, privkey) + sign_block_header(spec, state, header_2, privkey) - return ProposerSlashing( + return spec.ProposerSlashing( proposer_index=validator_index, header_1=header_1, header_2=header_2, diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index 1137561f1..63aa27d70 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -1,29 +1,23 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import process_slots - - def get_balance(state, index): return state.balances[index] -def next_slot(state): +def next_slot(spec, state): """ Transition to the next slot. """ - process_slots(state, state.slot + 1) + spec.process_slots(state, state.slot + 1) -def next_epoch(state): +def next_epoch(spec, state): """ Transition to the start slot of the next epoch """ slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - process_slots(state, slot) + spec.process_slots(state, slot) -def get_state_root(state, slot) -> bytes: +def get_state_root(spec, state, slot) -> bytes: """ Return the state root at a recent ``slot``. """ diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py index 2045f48ad..e619c5569 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/transfers.py +++ b/test_libs/pyspec/eth2spec/test/helpers/transfers.py @@ -1,20 +1,16 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_current_epoch, get_active_validator_indices, Transfer, get_domain from eth2spec.test.helpers.keys import pubkeys, privkeys from eth2spec.test.helpers.state import get_balance from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None, signed=False): +def get_valid_transfer(spec, state, slot=None, sender_index=None, amount=None, fee=None, signed=False): if slot is None: slot = state.slot - current_epoch = get_current_epoch(state) + current_epoch = spec.get_current_epoch(state) if sender_index is None: - sender_index = get_active_validator_indices(state, current_epoch)[-1] - recipient_index = get_active_validator_indices(state, current_epoch)[0] + sender_index = spec.get_active_validator_indices(state, current_epoch)[-1] + recipient_index = spec.get_active_validator_indices(state, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] @@ -23,7 +19,7 @@ def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=Non if amount is None: amount = get_balance(state, sender_index) - fee - transfer = Transfer( + transfer = spec.Transfer( sender=sender_index, recipient=recipient_index, amount=amount, @@ -32,24 +28,24 @@ def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=Non pubkey=transfer_pubkey, ) if signed: - sign_transfer(state, transfer, transfer_privkey) + sign_transfer(spec, state, transfer, transfer_privkey) # ensure withdrawal_credentials reproducible state.validator_registry[transfer.sender].withdrawal_credentials = ( - spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] + spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] ) return transfer -def sign_transfer(state, transfer, privkey): +def sign_transfer(spec, state, transfer, privkey): transfer.signature = bls_sign( message_hash=signing_root(transfer), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_TRANSFER, - message_epoch=get_current_epoch(state), + message_epoch=spec.get_current_epoch(state), ) ) return transfer diff --git a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py index 54376d694..120a9f600 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py +++ b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py @@ -1,26 +1,22 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import VoluntaryExit, get_domain from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def build_voluntary_exit(state, epoch, validator_index, privkey, signed=False): - voluntary_exit = VoluntaryExit( +def build_voluntary_exit(spec, state, epoch, validator_index, privkey, signed=False): + voluntary_exit = spec.VoluntaryExit( epoch=epoch, validator_index=validator_index, ) if signed: - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) return voluntary_exit -def sign_voluntary_exit(state, voluntary_exit, privkey): +def sign_voluntary_exit(spec, state, voluntary_exit, privkey): voluntary_exit.signature = bls_sign( message_hash=signing_root(voluntary_exit), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, message_epoch=voluntary_exit.epoch, diff --git a/test_libs/pyspec/eth2spec/test/phase_0/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py new file mode 100644 index 000000000..2b34ab405 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -0,0 +1,314 @@ +from copy import deepcopy + +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases +from eth2spec.test.helpers.attestations import ( + get_valid_attestation, + sign_attestation, +) +from eth2spec.test.helpers.state import ( + next_epoch, + next_slot, +) +from eth2spec.test.helpers.block import apply_empty_block + + +def run_attestation_processing(spec, state, attestation, valid=True): + """ + Run ``process_attestation``, yielding: + - pre-state ('pre') + - attestation ('attestation') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + # yield pre-state + yield 'pre', state + + yield 'attestation', attestation + + # If the attestation is invalid, processing is aborted, and there is no post-state. + if not valid: + expect_assertion_error(lambda: spec.process_attestation(state, attestation)) + yield 'post', None + return + + current_epoch_count = len(state.current_epoch_attestations) + previous_epoch_count = len(state.previous_epoch_attestations) + + # process attestation + spec.process_attestation(state, attestation) + + # Make sure the attestation has been processed + if attestation.data.target_epoch == spec.get_current_epoch(state): + assert len(state.current_epoch_attestations) == current_epoch_count + 1 + else: + assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 + + # yield post-state + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@spec_state_test +def test_success_previous_epoch(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + next_epoch(spec, state) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@spec_state_test +def test_success_since_max_epochs_per_crosslink(spec, state): + for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + data = attestation.data + # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs + assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK + + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_attestation_signature(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_before_inclusion_delay(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + # do not increment slot to allow for inclusion delay + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_after_epoch_slots(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + # increment past latest inclusion slot + spec.process_slots(state, state.slot + spec.SLOTS_PER_EPOCH + 1) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_old_source_epoch(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 5 + state.finalized_epoch = 2 + state.previous_justified_epoch = 3 + state.current_justified_epoch = 4 + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + + # test logic sanity check: make sure the attestation is pointing to oldest known source epoch + assert attestation.data.source_epoch == state.previous_justified_epoch + + # Now go beyond that, it will be invalid + attestation.data.source_epoch -= 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_wrong_shard(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink.shard += 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_new_source_epoch(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_epoch += 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_source_root_is_target_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = attestation.data.target_root + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_invalid_current_source_root(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 5 + state.finalized_epoch = 2 + + state.previous_justified_epoch = 3 + state.previous_justified_root = b'\x01' * 32 + + state.current_justified_epoch = 4 + state.current_justified_root = b'\xff' * 32 + + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + # Test logic sanity checks: + assert state.current_justified_root != state.previous_justified_root + assert attestation.data.source_root == state.previous_justified_root + + # Make attestation source root invalid: should be previous justified, not current one + attestation.data.source_root = state.current_justified_root + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_source_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = b'\x42' * 32 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_phases(['phase0']) +@spec_state_test +def test_non_zero_crosslink_data_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink.data_root = b'\x42' * 32 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_parent_crosslink(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.parent_root = b'\x27' * 32 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_crosslink_start_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.start_epoch += 1 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_crosslink_end_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.end_epoch += 1 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_inconsistent_bitfields(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00' + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_phases(['phase0']) +@spec_state_test +def test_non_empty_custody_bitfield(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_empty_aggregation_bitfield(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py new file mode 100644 index 000000000..6c7637d59 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py @@ -0,0 +1,153 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.attestations import sign_indexed_attestation +from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.state import ( + get_balance, + next_epoch, +) + + +def run_attester_slashing_processing(spec, state, attester_slashing, valid=True): + """ + Run ``process_attester_slashing``, yielding: + - pre-state ('pre') + - attester_slashing ('attester_slashing') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + yield 'pre', state + yield 'attester_slashing', attester_slashing + + if not valid: + expect_assertion_error(lambda: spec.process_attester_slashing(state, attester_slashing)) + yield 'post', None + return + + slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + pre_slashed_balance = get_balance(state, slashed_index) + + proposer_index = spec.get_beacon_proposer_index(state) + pre_proposer_balance = get_balance(state, proposer_index) + + # Process slashing + spec.process_attester_slashing(state, attester_slashing) + + slashed_validator = state.validator_registry[slashed_index] + + # Check slashing + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + if slashed_index != proposer_index: + # lost whistleblower reward + assert get_balance(state, slashed_index) < pre_slashed_balance + # gained whistleblower reward + assert get_balance(state, proposer_index) > pre_proposer_balance + else: + # gained rewards for all slashings, which may include others. And only lost that of themselves. + # Netto at least 0, if more people where slashed, a balance increase. + assert get_balance(state, slashed_index) >= pre_slashed_balance + + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success_double(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) + + yield from run_attester_slashing_processing(spec, state, attester_slashing) + + +@with_all_phases +@spec_state_test +def test_success_surround(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + state.current_justified_epoch += 1 + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + # set attestion1 to surround attestation 2 + attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 + attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1 + + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_2(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_same_data(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.data = attester_slashing.attestation_2.data + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_no_double_or_surround(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.data.target_epoch += 1 + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_participants_already_slashed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) + + # set all indices to slashed + attestation_1 = attester_slashing.attestation_1 + validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices + for index in validator_indices: + state.validator_registry[index].slashed = True + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_custody_bit_0_and_1(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.custody_bit_1_indices = ( + attester_slashing.attestation_1.custody_bit_0_indices + ) + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py new file mode 100644 index 000000000..f3c017982 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py @@ -0,0 +1,85 @@ +from copy import deepcopy + +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, + sign_block +) +from eth2spec.test.helpers.state import next_slot + + +def prepare_state_for_header_processing(spec, state): + spec.process_slots(state, state.slot + 1) + + +def run_block_header_processing(spec, state, block, valid=True): + """ + Run ``process_block_header``, yielding: + - pre-state ('pre') + - block ('block') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + prepare_state_for_header_processing(spec, state) + + yield 'pre', state + yield 'block', block + + if not valid: + expect_assertion_error(lambda: spec.process_block_header(state, block)) + yield 'post', None + return + + spec.process_block_header(state, block) + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state, signed=True) + yield from run_block_header_processing(spec, state, block) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state) + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_invalid_slot_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state) + block.slot = state.slot + 2 # invalid slot + sign_block(spec, state, block) + + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_invalid_parent_root(spec, state): + block = build_empty_block_for_next_slot(spec, state) + block.parent_root = b'\12' * 32 # invalid prev root + sign_block(spec, state, block) + + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_proposer_slashed(spec, state): + # use stub state to get proposer index of next slot + stub_state = deepcopy(state) + next_slot(spec, stub_state) + proposer_index = spec.get_beacon_proposer_index(stub_state) + + # set proposer to slashed + state.validator_registry[proposer_index].slashed = True + + block = build_empty_block_for_next_slot(spec, state, signed=True) + + yield from run_block_header_processing(spec, state, block, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py similarity index 63% rename from test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py rename to test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py index 0430dd12f..c50b11f2e 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py @@ -1,6 +1,4 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import process_deposit -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases from eth2spec.test.helpers.deposits import ( build_deposit, prepare_state_and_deposit, @@ -10,7 +8,7 @@ from eth2spec.test.helpers.state import get_balance from eth2spec.test.helpers.keys import privkeys, pubkeys -def run_deposit_processing(state, deposit, validator_index, valid=True, effective=True): +def run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True): """ Run ``process_deposit``, yielding: - pre-state ('pre') @@ -27,11 +25,11 @@ def run_deposit_processing(state, deposit, validator_index, valid=True, effectiv yield 'deposit', deposit if not valid: - expect_assertion_error(lambda: process_deposit(state, deposit)) + expect_assertion_error(lambda: spec.process_deposit(state, deposit)) yield 'post', None return - process_deposit(state, deposit) + spec.process_deposit(state, deposit) yield 'post', state @@ -54,52 +52,58 @@ def run_deposit_processing(state, deposit, validator_index, valid=True, effectiv assert state.deposit_index == state.latest_eth1_data.deposit_count +@with_all_phases @spec_state_test -def test_new_deposit(state): +def test_new_deposit(spec, state): # fresh deposit = next validator index = validator appended to registry validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) - yield from run_deposit_processing(state, deposit, validator_index) + yield from run_deposit_processing(spec, state, deposit, validator_index) +@with_all_phases @always_bls @spec_state_test -def test_invalid_sig_new_deposit(state): +def test_invalid_sig_new_deposit(spec, state): # fresh deposit = next validator index = validator appended to registry validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=False) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False) +@with_all_phases @spec_state_test -def test_success_top_up(state): +def test_success_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) - yield from run_deposit_processing(state, deposit, validator_index) + yield from run_deposit_processing(spec, state, deposit, validator_index) +@with_all_phases @always_bls @spec_state_test -def test_invalid_sig_top_up(state): +def test_invalid_sig_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) # invalid signatures, in top-ups, are allowed! - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True) +@with_all_phases @spec_state_test -def test_invalid_withdrawal_credentials_top_up(state): +def test_invalid_withdrawal_credentials_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(b"junk")[1:] deposit = prepare_state_and_deposit( + spec, state, validator_index, amount, @@ -107,38 +111,41 @@ def test_invalid_withdrawal_credentials_top_up(state): ) # inconsistent withdrawal credentials, in top-ups, are allowed! - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True) +@with_all_phases @spec_state_test -def test_wrong_index(state): +def test_wrong_index(spec, state): validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) # mess up deposit_index deposit.index = state.deposit_index + 1 - sign_deposit_data(state, deposit.data, privkeys[validator_index]) + sign_deposit_data(spec, state, deposit.data, privkeys[validator_index]) - yield from run_deposit_processing(state, deposit, validator_index, valid=False) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False) +@with_all_phases @spec_state_test -def test_wrong_deposit_for_deposit_count(state): +def test_wrong_deposit_for_deposit_count(spec, state): deposit_data_leaves = [spec.ZERO_HASH] * len(state.validator_registry) # build root for deposit_1 index_1 = len(deposit_data_leaves) pubkey_1 = pubkeys[index_1] privkey_1 = privkeys[index_1] - deposit_1, root_1, deposit_data_leaves = build_deposit( + _, _, deposit_data_leaves = build_deposit( + spec, state, deposit_data_leaves, pubkey_1, privkey_1, spec.MAX_EFFECTIVE_BALANCE, - withdrawal_credentials=b'\x00'*32, + withdrawal_credentials=b'\x00' * 32, signed=True, ) deposit_count_1 = len(deposit_data_leaves) @@ -148,12 +155,13 @@ def test_wrong_deposit_for_deposit_count(state): pubkey_2 = pubkeys[index_2] privkey_2 = privkeys[index_2] deposit_2, root_2, deposit_data_leaves = build_deposit( + spec, state, deposit_data_leaves, pubkey_2, privkey_2, spec.MAX_EFFECTIVE_BALANCE, - withdrawal_credentials=b'\x00'*32, + withdrawal_credentials=b'\x00' * 32, signed=True, ) @@ -161,21 +169,22 @@ def test_wrong_deposit_for_deposit_count(state): state.latest_eth1_data.deposit_root = root_2 state.latest_eth1_data.deposit_count = deposit_count_1 - yield from run_deposit_processing(state, deposit_2, index_2, valid=False) + yield from run_deposit_processing(spec, state, deposit_2, index_2, valid=False) # TODO: test invalid signature +@with_all_phases @spec_state_test -def test_bad_merkle_proof(state): +def test_bad_merkle_proof(spec, state): validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) # mess up merkle branch deposit.proof[-1] = spec.ZERO_HASH - sign_deposit_data(state, deposit.data, privkeys[validator_index]) + sign_deposit_data(spec, state, deposit.data, privkeys[validator_index]) - yield from run_deposit_processing(state, deposit, validator_index, valid=False) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py new file mode 100644 index 000000000..b35241859 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py @@ -0,0 +1,142 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.block_header import sign_block_header +from eth2spec.test.helpers.keys import privkeys +from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing +from eth2spec.test.helpers.state import get_balance + + +def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True): + """ + Run ``process_proposer_slashing``, yielding: + - pre-state ('pre') + - proposer_slashing ('proposer_slashing') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + yield 'pre', state + yield 'proposer_slashing', proposer_slashing + + if not valid: + expect_assertion_error(lambda: spec.process_proposer_slashing(state, proposer_slashing)) + yield 'post', None + return + + pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index) + + spec.process_proposer_slashing(state, proposer_slashing) + yield 'post', state + + # check if slashed + slashed_validator = state.validator_registry[proposer_slashing.proposer_index] + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + # lost whistleblower reward + assert ( + get_balance(state, proposer_slashing.proposer_index) < + pre_proposer_balance + ) + + +@with_all_phases +@spec_state_test +def test_success(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_2(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_invalid_proposer_index(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + # Index just too high (by 1) + proposer_slashing.proposer_index = len(state.validator_registry) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_epochs_are_different(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + + # set slots to be in different epochs + proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH + sign_block_header(spec, state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index]) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_headers_are_same(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + + # set headers to be the same + proposer_slashing.header_2 = proposer_slashing.header_1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_not_activated(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # set proposer to be not active yet + state.validator_registry[proposer_slashing.proposer_index].activation_epoch = spec.get_current_epoch(state) + 1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_slashed(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # set proposer to slashed + state.validator_registry[proposer_slashing.proposer_index].slashed = True + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_withdrawn(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # move 1 epoch into future, to allow for past withdrawable epoch + state.slot += spec.SLOTS_PER_EPOCH + # set proposer withdrawable_epoch in past + current_epoch = spec.get_current_epoch(state) + proposer_index = proposer_slashing.proposer_index + state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py new file mode 100644 index 000000000..1294ca84a --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py @@ -0,0 +1,184 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.transfers import get_valid_transfer + + +def run_transfer_processing(spec, state, transfer, valid=True): + """ + Run ``process_transfer``, yielding: + - pre-state ('pre') + - transfer ('transfer') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + proposer_index = spec.get_beacon_proposer_index(state) + pre_transfer_sender_balance = state.balances[transfer.sender] + pre_transfer_recipient_balance = state.balances[transfer.recipient] + pre_transfer_proposer_balance = state.balances[proposer_index] + + yield 'pre', state + yield 'transfer', transfer + + if not valid: + expect_assertion_error(lambda: spec.process_transfer(state, transfer)) + yield 'post', None + return + + spec.process_transfer(state, transfer) + yield 'post', state + + sender_balance = state.balances[transfer.sender] + recipient_balance = state.balances[transfer.recipient] + assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee + assert recipient_balance == pre_transfer_recipient_balance + transfer.amount + assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee + + +@with_all_phases +@spec_state_test +def test_success_non_activated(spec, state): + transfer = get_valid_transfer(spec, state, signed=True) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_withdrawable(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + transfer = get_valid_transfer(spec, state, signed=True) + + # withdrawable_epoch in past so can transfer + state.validator_registry[transfer.sender].withdrawable_epoch = spec.get_current_epoch(state) - 1 + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_active_above_max_effective(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_active_above_max_effective_fee(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_signature(spec, state): + transfer = get_valid_transfer(spec, state) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_active_but_transfer_past_effective_balance(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + amount = spec.MAX_EFFECTIVE_BALANCE // 32 + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=amount, fee=0, signed=True) + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_incorrect_slot(spec, state): + transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_insufficient_balance_for_fee(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_insufficient_balance(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_no_dust_sender(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + balance = state.balances[sender_index] + transfer = get_valid_transfer( + spec, + state, + sender_index=sender_index, + amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, + fee=0, + signed=True, + ) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_no_dust_recipient(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + state.balances[transfer.recipient] = 0 + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_invalid_pubkey(spec, state): + transfer = get_valid_transfer(spec, state, signed=True) + state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py similarity index 61% rename from test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py rename to test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py index 53fb4e3f7..3359c5e78 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py @@ -1,16 +1,9 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_active_validator_indices, - get_churn_limit, - get_current_epoch, - process_voluntary_exit, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.voluntary_exits import build_voluntary_exit, sign_voluntary_exit -def run_voluntary_exit_processing(state, voluntary_exit, valid=True): +def run_voluntary_exit_processing(spec, state, voluntary_exit, valid=True): """ Run ``process_voluntary_exit``, yielding: - pre-state ('pre') @@ -24,13 +17,13 @@ def run_voluntary_exit_processing(state, voluntary_exit, valid=True): yield 'voluntary_exit', voluntary_exit if not valid: - expect_assertion_error(lambda: process_voluntary_exit(state, voluntary_exit)) + expect_assertion_error(lambda: spec.process_voluntary_exit(state, voluntary_exit)) yield 'post', None return pre_exit_epoch = state.validator_registry[validator_index].exit_epoch - process_voluntary_exit(state, voluntary_exit) + spec.process_voluntary_exit(state, voluntary_exit) yield 'post', state @@ -38,50 +31,54 @@ def run_voluntary_exit_processing(state, voluntary_exit, valid=True): assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_success(state): +def test_success(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey, signed=True) + voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey, signed=True) - yield from run_voluntary_exit_processing(state, voluntary_exit) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit) +@with_all_phases @always_bls @spec_state_test -def test_invalid_signature(state): +def test_invalid_signature(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey) + voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_success_exit_queue(state): +def test_success_exit_queue(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) + current_epoch = spec.get_current_epoch(state) # exit `MAX_EXITS_PER_EPOCH` - initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)] + initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_churn_limit(state)] # Prepare a bunch of exits, based on the current state exit_queue = [] for index in initial_indices: privkey = pubkey_to_privkey[state.validator_registry[index].pubkey] exit_queue.append(build_voluntary_exit( + spec, state, current_epoch, index, @@ -92,13 +89,14 @@ def test_success_exit_queue(state): # Now run all the exits for voluntary_exit in exit_queue: # the function yields data, but we are just interested in running it here, ignore yields. - for _ in run_voluntary_exit_processing(state, voluntary_exit): + for _ in run_voluntary_exit_processing(spec, state, voluntary_exit): continue # exit an additional validator - validator_index = get_active_validator_indices(state, current_epoch)[-1] + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -108,7 +106,7 @@ def test_success_exit_queue(state): # This is the interesting part of the test: on a pre-state with a full exit queue, # when processing an additional exit, it results in an exit in a later epoch - yield from run_voluntary_exit_processing(state, voluntary_exit) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit) assert ( state.validator_registry[validator_index].exit_epoch == @@ -116,16 +114,18 @@ def test_success_exit_queue(state): ) +@with_all_phases @spec_state_test -def test_validator_exit_in_future(state): +def test_validator_exit_in_future(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -133,21 +133,23 @@ def test_validator_exit_in_future(state): signed=False, ) voluntary_exit.epoch += 1 - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_invalid_validator_index(state): +def test_validator_invalid_validator_index(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -155,21 +157,23 @@ def test_validator_invalid_validator_index(state): signed=False, ) voluntary_exit.validator_index = len(state.validator_registry) - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_not_active(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] +def test_validator_not_active(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH # build and test voluntary exit voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -177,22 +181,24 @@ def test_validator_not_active(state): signed=True, ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_already_exited(state): +def test_validator_already_exited(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] # but validator already has exited state.validator_registry[validator_index].exit_epoch = current_epoch + 2 voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -200,16 +206,18 @@ def test_validator_already_exited(state): signed=True, ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_not_active_long_enough(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] +def test_validator_not_active_long_enough(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -222,4 +230,4 @@ def test_validator_not_active_long_enough(state): spec.PERSISTENT_COMMITTEE_PERIOD ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py similarity index 53% rename from test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py rename to test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py index bb308a9f0..65d958678 100644 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py @@ -1,14 +1,6 @@ from copy import deepcopy -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import ( - process_slot, - get_crosslink_deltas, - process_crosslinks, - state_transition, -) -from eth2spec.test.context import spec_state_test +from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.state import ( next_epoch, next_slot @@ -18,13 +10,12 @@ from eth2spec.test.helpers.attestations import ( add_attestation_to_state, build_empty_block_for_next_slot, fill_aggregate_attestation, - get_crosslink_committee, get_valid_attestation, sign_attestation, ) -def run_process_crosslinks(state, valid=True): +def run_process_crosslinks(spec, state, valid=True): """ Run ``process_crosslinks``, yielding: - pre-state ('pre') @@ -33,117 +24,127 @@ def run_process_crosslinks(state, valid=True): """ # transition state to slot before state transition slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot = slot - sign_block(state, block) - state_transition(state, block) + sign_block(spec, state, block) + spec.state_transition(state, block) # cache state before epoch transition - process_slot(state) + spec.process_slot(state) yield 'pre', state - process_crosslinks(state) + spec.process_crosslinks(state) yield 'post', state +@with_all_phases @spec_state_test -def test_no_attestations(state): - yield from run_process_crosslinks(state) +def test_no_attestations(spec, state): + yield from run_process_crosslinks(spec, state) for shard in range(spec.SHARD_COUNT): assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] +@with_all_phases @spec_state_test -def test_single_crosslink_update_from_current_epoch(state): - next_epoch(state) +def test_single_crosslink_update_from_current_epoch(spec, state): + next_epoch(spec, state) - attestation = get_valid_attestation(state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) - fill_aggregate_attestation(state, attestation) - add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + fill_aggregate_attestation(spec, state, attestation) + add_attestation_to_state(spec, state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) assert len(state.current_epoch_attestations) == 1 shard = attestation.data.crosslink.shard pre_crosslink = deepcopy(state.current_crosslinks[shard]) - yield from run_process_crosslinks(state) + yield from run_process_crosslinks(spec, state) assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] assert pre_crosslink != state.current_crosslinks[shard] +@with_all_phases @spec_state_test -def test_single_crosslink_update_from_previous_epoch(state): - next_epoch(state) +def test_single_crosslink_update_from_previous_epoch(spec, state): + next_epoch(spec, state) - attestation = get_valid_attestation(state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) - fill_aggregate_attestation(state, attestation) - add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) + fill_aggregate_attestation(spec, state, attestation) + add_attestation_to_state(spec, state, attestation, state.slot + spec.SLOTS_PER_EPOCH) assert len(state.previous_epoch_attestations) == 1 shard = attestation.data.crosslink.shard pre_crosslink = deepcopy(state.current_crosslinks[shard]) - crosslink_deltas = get_crosslink_deltas(state) + crosslink_deltas = spec.get_crosslink_deltas(state) - yield from run_process_crosslinks(state) + yield from run_process_crosslinks(spec, state) assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] assert pre_crosslink != state.current_crosslinks[shard] # ensure rewarded - for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.crosslink.shard): + for index in spec.get_crosslink_committee( + state, + attestation.data.target_epoch, + attestation.data.crosslink.shard): assert crosslink_deltas[0][index] > 0 assert crosslink_deltas[1][index] == 0 +@with_all_phases @spec_state_test -def test_double_late_crosslink(state): +def test_double_late_crosslink(spec, state): if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT: print("warning: ignoring test, test-assumptions are incompatible with configuration") return - next_epoch(state) + next_epoch(spec, state) state.slot += 4 - attestation_1 = get_valid_attestation(state, signed=True) - fill_aggregate_attestation(state, attestation_1) + attestation_1 = get_valid_attestation(spec, state, signed=True) + fill_aggregate_attestation(spec, state, attestation_1) # add attestation_1 to next epoch - next_epoch(state) - add_attestation_to_state(state, attestation_1, state.slot + 1) + next_epoch(spec, state) + add_attestation_to_state(spec, state, attestation_1, state.slot + 1) - for slot in range(spec.SLOTS_PER_EPOCH): - attestation_2 = get_valid_attestation(state) + for _ in range(spec.SLOTS_PER_EPOCH): + attestation_2 = get_valid_attestation(spec, state) if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard: - sign_attestation(state, attestation_2) + sign_attestation(spec, state, attestation_2) break - next_slot(state) - apply_empty_block(state) + next_slot(spec, state) + apply_empty_block(spec, state) - fill_aggregate_attestation(state, attestation_2) + fill_aggregate_attestation(spec, state, attestation_2) # add attestation_2 in the next epoch after attestation_1 has # already updated the relevant crosslink - next_epoch(state) - add_attestation_to_state(state, attestation_2, state.slot + 1) + next_epoch(spec, state) + add_attestation_to_state(spec, state, attestation_2, state.slot + 1) assert len(state.previous_epoch_attestations) == 1 assert len(state.current_epoch_attestations) == 0 - crosslink_deltas = get_crosslink_deltas(state) + crosslink_deltas = spec.get_crosslink_deltas(state) - yield from run_process_crosslinks(state) + yield from run_process_crosslinks(spec, state) shard = attestation_2.data.crosslink.shard # ensure that the current crosslinks were not updated by the second attestation assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] # ensure no reward, only penalties for the failed crosslink - for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.crosslink.shard): + for index in spec.get_crosslink_committee( + state, + attestation_2.data.target_epoch, + attestation_2.data.crosslink.shard): assert crosslink_deltas[0][index] == 0 assert crosslink_deltas[1][index] > 0 diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py similarity index 65% rename from test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py rename to test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py index 8f6e35088..e6679f844 100644 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py @@ -1,17 +1,10 @@ -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import ( - get_current_epoch, - is_active_validator, - process_registry_updates -) from eth2spec.phase0.spec import state_transition from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block from eth2spec.test.helpers.state import next_epoch -from eth2spec.test.context import spec_state_test +from eth2spec.test.context import spec_state_test, with_all_phases -def run_process_registry_updates(state, valid=True): +def run_process_registry_updates(spec, state, valid=True): """ Run ``process_crosslinks``, yielding: - pre-state ('pre') @@ -20,9 +13,9 @@ def run_process_registry_updates(state, valid=True): """ # transition state to slot before state transition slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot = slot - sign_block(state, block) + sign_block(spec, state, block) state_transition(state, block) # cache state before epoch transition @@ -34,50 +27,52 @@ def run_process_registry_updates(state, valid=True): spec.process_rewards_and_penalties(state) yield 'pre', state - process_registry_updates(state) + spec.process_registry_updates(state) yield 'post', state +@with_all_phases @spec_state_test -def test_activation(state): +def test_activation(spec, state): index = 0 - assert is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) # Mock a new deposit state.validator_registry[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH state.validator_registry[index].activation_epoch = spec.FAR_FUTURE_EPOCH state.validator_registry[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE - assert not is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert not spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): - next_epoch(state) + next_epoch(spec, state) - yield from run_process_registry_updates(state) + yield from run_process_registry_updates(spec, state) assert state.validator_registry[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validator_registry[index].activation_epoch != spec.FAR_FUTURE_EPOCH - assert is_active_validator( + assert spec.is_active_validator( state.validator_registry[index], - get_current_epoch(state), + spec.get_current_epoch(state), ) +@with_all_phases @spec_state_test -def test_ejection(state): +def test_ejection(spec, state): index = 0 - assert is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) assert state.validator_registry[index].exit_epoch == spec.FAR_FUTURE_EPOCH # Mock an ejection state.validator_registry[index].effective_balance = spec.EJECTION_BALANCE for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): - next_epoch(state) + next_epoch(spec, state) - yield from run_process_registry_updates(state) + yield from run_process_registry_updates(spec, state) assert state.validator_registry[index].exit_epoch != spec.FAR_FUTURE_EPOCH - assert not is_active_validator( + assert not spec.is_active_validator( state.validator_registry[index], - get_current_epoch(state), + spec.get_current_epoch(state), ) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/__init__.py b/test_libs/pyspec/eth2spec/test/phase_1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py new file mode 100644 index 000000000..110231d77 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py @@ -0,0 +1,128 @@ +from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.state import next_epoch, get_balance +from eth2spec.test.context import with_all_phases_except, spec_state_test, expect_assertion_error + + +def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, valid=True): + """ + Run ``process_randao_key_reveal``, yielding: + - pre-state ('pre') + - randao_key_reveal ('randao_key_reveal') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'randao_key_reveal', randao_key_reveal + + if not valid: + expect_assertion_error(lambda: spec.process_early_derived_secret_reveal(state, randao_key_reveal)) + yield 'post', None + return + + pre_slashed_balance = get_balance(state, randao_key_reveal.revealed_index) + + spec.process_early_derived_secret_reveal(state, randao_key_reveal) + + slashed_validator = state.validator_registry[randao_key_reveal.revealed_index] + + if randao_key_reveal.epoch >= spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING: + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + assert get_balance(state, randao_key_reveal.revealed_index) < pre_slashed_balance + yield 'post', state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_success(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_from_current_epoch(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_from_past_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state) - 1) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_with_custody_padding(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING, + ) + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_with_custody_padding_minus_one(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING - 1, + ) + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_double_reveal(spec, state): + randao_key_reveal1 = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS + 1, + ) + res = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1)) + pre_state = res['pre'] + yield 'pre', pre_state + intermediate_state = res['post'] + + randao_key_reveal2 = get_valid_early_derived_secret_reveal( + spec, + intermediate_state, + spec.get_current_epoch(pre_state) + spec.RANDAO_PENALTY_EPOCHS + 1, + ) + res = dict(run_early_derived_secret_reveal_processing(spec, intermediate_state, randao_key_reveal2, False)) + post_state = res['post'] + yield 'randao_key_reveal', [randao_key_reveal1, randao_key_reveal2] + yield 'post', post_state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_revealer_is_slashed(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) + state.validator_registry[randao_key_reveal.revealed_index].slashed = True + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_far_future_epoch(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS, + ) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py index 654a41d62..587c37742 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py @@ -1,20 +1,9 @@ from copy import deepcopy +from typing import List -import eth2spec.phase0.spec as spec +from eth2spec.utils.ssz.ssz_impl import signing_root from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root -from eth2spec.phase0.spec import ( - # SSZ - VoluntaryExit, - # functions - get_active_validator_indices, - get_beacon_proposer_index, - get_block_root_at_slot, - get_current_epoch, - get_domain, - state_transition, -) from eth2spec.test.helpers.state import get_balance from eth2spec.test.helpers.transfers import get_valid_transfer from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block @@ -24,89 +13,94 @@ from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.deposits import prepare_state_and_deposit -from eth2spec.test.context import spec_state_test, never_bls +from eth2spec.test.context import spec_state_test, never_bls, with_all_phases +@with_all_phases @never_bls @spec_state_test -def test_empty_block_transition(state): +def test_empty_block_transition(spec, state): pre_slot = state.slot pre_eth1_votes = len(state.eth1_data_votes) yield 'pre', state - block = build_empty_block_for_next_slot(state, signed=True) - yield 'blocks', [block], [spec.BeaconBlock] + block = build_empty_block_for_next_slot(spec, state, signed=True) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.eth1_data_votes) == pre_eth1_votes + 1 - assert get_block_root_at_slot(state, pre_slot) == block.parent_root + assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root +@with_all_phases @never_bls @spec_state_test -def test_skipped_slots(state): +def test_skipped_slots(spec, state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += 3 - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): - assert get_block_root_at_slot(state, slot) == block.parent_root + assert spec.get_block_root_at_slot(state, slot) == block.parent_root +@with_all_phases @spec_state_test -def test_empty_epoch_transition(state): +def test_empty_epoch_transition(spec, state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): - assert get_block_root_at_slot(state, slot) == block.parent_root + assert spec.get_block_root_at_slot(state, slot) == block.parent_root +# @with_all_phases # @spec_state_test -# def test_empty_epoch_transition_not_finalizing(state): +# def test_empty_epoch_transition_not_finalizing(spec, state): # # copy for later balance lookups. # pre_state = deepcopy(state) # yield 'pre', state -# -# block = build_empty_block_for_next_slot(state) + +# block = build_empty_block_for_next_slot(spec, state) # block.slot += spec.SLOTS_PER_EPOCH * 5 -# sign_block(state, block, proposer_index=0) -# yield 'blocks', [block], [spec.BeaconBlock] -# -# state_transition(state, block) +# sign_block(spec, state, block, proposer_index=0) +# yield 'blocks', [block], List[spec.BeaconBlock] + +# spec.state_transition(state, block) # yield 'post', state -# + # assert state.slot == block.slot -# assert state.finalized_epoch < get_current_epoch(state) - 4 +# assert state.finalized_epoch < spec.get_current_epoch(state) - 4 # for index in range(len(state.validator_registry)): # assert get_balance(state, index) < get_balance(pre_state, index) +@with_all_phases @spec_state_test -def test_proposer_slashing(state): +def test_proposer_slashing(spec, state): # copy for later balance lookups. pre_state = deepcopy(state) - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) validator_index = proposer_slashing.proposer_index assert not state.validator_registry[validator_index].slashed @@ -116,12 +110,12 @@ def test_proposer_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings.append(proposer_slashing) - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state # check if slashed @@ -133,12 +127,13 @@ def test_proposer_slashing(state): assert get_balance(state, validator_index) < get_balance(pre_state, validator_index) +@with_all_phases @spec_state_test -def test_attester_slashing(state): +def test_attester_slashing(spec, state): # copy for later balance lookups. pre_state = deepcopy(state) - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) validator_index = (attester_slashing.attestation_1.custody_bit_0_indices + attester_slashing.attestation_1.custody_bit_1_indices)[0] @@ -149,12 +144,12 @@ def test_attester_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings.append(attester_slashing) - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state slashed_validator = state.validator_registry[validator_index] @@ -164,7 +159,7 @@ def test_attester_slashing(state): # lost whistleblower reward assert get_balance(state, validator_index) < get_balance(pre_state, validator_index) - proposer_index = get_beacon_proposer_index(state) + proposer_index = spec.get_beacon_proposer_index(state) # gained whistleblower reward assert ( get_balance(state, proposer_index) > @@ -174,24 +169,25 @@ def test_attester_slashing(state): # TODO update functions below to be like above, i.e. with @spec_state_test and yielding data to put into the test vector +@with_all_phases @spec_state_test -def test_deposit_in_block(state): +def test_deposit_in_block(spec, state): initial_registry_len = len(state.validator_registry) initial_balances_len = len(state.balances) validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.validator_registry) == initial_registry_len + 1 @@ -200,11 +196,12 @@ def test_deposit_in_block(state): assert state.validator_registry[validator_index].pubkey == pubkeys[validator_index] +@with_all_phases @spec_state_test -def test_deposit_top_up(state): +def test_deposit_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) initial_registry_len = len(state.validator_registry) initial_balances_len = len(state.balances) @@ -212,13 +209,13 @@ def test_deposit_top_up(state): yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.validator_registry) == initial_registry_len @@ -226,44 +223,46 @@ def test_deposit_top_up(state): assert get_balance(state, validator_index) == validator_pre_balance + amount +@with_all_phases @spec_state_test -def test_attestation(state): +def test_attestation(spec, state): state.slot = spec.SLOTS_PER_EPOCH yield 'pre', state - attestation = get_valid_attestation(state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) # Add to state via block transition pre_current_attestations_len = len(state.current_epoch_attestations) - attestation_block = build_empty_block_for_next_slot(state) + attestation_block = build_empty_block_for_next_slot(spec, state) attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation_block.body.attestations.append(attestation) - sign_block(state, attestation_block) - state_transition(state, attestation_block) + sign_block(spec, state, attestation_block) + spec.state_transition(state, attestation_block) assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations) - epoch_block = build_empty_block_for_next_slot(state) + epoch_block = build_empty_block_for_next_slot(spec, state) epoch_block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, epoch_block) - state_transition(state, epoch_block) + sign_block(spec, state, epoch_block) + spec.state_transition(state, epoch_block) - yield 'blocks', [attestation_block, epoch_block], [spec.BeaconBlock] + yield 'blocks', [attestation_block, epoch_block], List[spec.BeaconBlock] yield 'post', state assert len(state.current_epoch_attestations) == 0 assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root +@with_all_phases @spec_state_test -def test_voluntary_exit(state): - validator_index = get_active_validator_indices( +def test_voluntary_exit(spec, state): + validator_index = spec.get_active_validator_indices( state, - get_current_epoch(state) + spec.get_current_epoch(state) )[-1] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit @@ -271,48 +270,49 @@ def test_voluntary_exit(state): yield 'pre', state - voluntary_exit = VoluntaryExit( - epoch=get_current_epoch(state), + voluntary_exit = spec.VoluntaryExit( + epoch=spec.get_current_epoch(state), validator_index=validator_index, ) voluntary_exit.signature = bls_sign( message_hash=signing_root(voluntary_exit), privkey=privkeys[validator_index], - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, ) ) # Add to state via block transition - initiate_exit_block = build_empty_block_for_next_slot(state) + initiate_exit_block = build_empty_block_for_next_slot(spec, state) initiate_exit_block.body.voluntary_exits.append(voluntary_exit) - sign_block(state, initiate_exit_block) - state_transition(state, initiate_exit_block) + sign_block(spec, state, initiate_exit_block) + spec.state_transition(state, initiate_exit_block) assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # Process within epoch transition - exit_block = build_empty_block_for_next_slot(state) + exit_block = build_empty_block_for_next_slot(spec, state) exit_block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, exit_block) - state_transition(state, exit_block) + sign_block(spec, state, exit_block) + spec.state_transition(state, exit_block) - yield 'blocks', [initiate_exit_block, exit_block], [spec.BeaconBlock] + yield 'blocks', [initiate_exit_block, exit_block], List[spec.BeaconBlock] yield 'post', state assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_transfer(state): +def test_transfer(spec, state): # overwrite default 0 to test spec.MAX_TRANSFERS = 1 - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] amount = get_balance(state, sender_index) - transfer = get_valid_transfer(state, state.slot + 1, sender_index, amount, signed=True) + transfer = get_valid_transfer(spec, state, state.slot + 1, sender_index, amount, signed=True) recipient_index = transfer.recipient pre_transfer_recipient_balance = get_balance(state, recipient_index) @@ -322,13 +322,13 @@ def test_transfer(state): yield 'pre', state # Add to state via block transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.transfers.append(transfer) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state sender_balance = get_balance(state, sender_index) @@ -337,10 +337,11 @@ def test_transfer(state): assert recipient_balance == pre_transfer_recipient_balance + amount +@with_all_phases @spec_state_test -def test_balance_driven_status_transitions(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[-1] +def test_balance_driven_status_transitions(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH @@ -350,57 +351,59 @@ def test_balance_driven_status_transitions(state): yield 'pre', state # trigger epoch transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, block) - state_transition(state, block) + sign_block(spec, state, block) + spec.state_transition(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] yield 'post', state assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_historical_batch(state): +def test_historical_batch(spec, state): state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 pre_historical_roots_len = len(state.historical_roots) yield 'pre', state - block = build_empty_block_for_next_slot(state, signed=True) - state_transition(state, block) + block = build_empty_block_for_next_slot(spec, state, signed=True) + spec.state_transition(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] yield 'post', state assert state.slot == block.slot - assert get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 + assert spec.get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 assert len(state.historical_roots) == pre_historical_roots_len + 1 +# @with_all_phases # @spec_state_test -# def test_eth1_data_votes(state): +# def test_eth1_data_votes(spec, state): # yield 'pre', state -# + # expected_votes = 0 # assert len(state.eth1_data_votes) == expected_votes -# + # blocks = [] # for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1): -# block = build_empty_block_for_next_slot(state) -# state_transition(state, block) +# block = build_empty_block_for_next_slot(spec, state) +# spec.state_transition(state, block) # expected_votes += 1 # assert len(state.eth1_data_votes) == expected_votes # blocks.append(block) -# -# block = build_empty_block_for_next_slot(state) + +# block = build_empty_block_for_next_slot(spec, state) # blocks.append(block) -# -# state_transition(state, block) -# -# yield 'blocks', [block], [spec.BeaconBlock] + +# spec.state_transition(state, block) + +# yield 'blocks', [block], List[spec.BeaconBlock] # yield 'post', state -# + # assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 # assert len(state.eth1_data_votes) == 1 diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_slots.py b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py index 4c3897a6c..6ef6be4d3 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_slots.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py @@ -1,57 +1,59 @@ -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import process_slots from eth2spec.test.helpers.state import get_state_root -from eth2spec.test.context import spec_state_test +from eth2spec.test.context import spec_state_test, with_all_phases +@with_all_phases @spec_state_test -def test_slots_1(state): +def test_slots_1(spec, state): pre_slot = state.slot pre_root = state.hash_tree_root() yield 'pre', state slots = 1 yield 'slots', slots - process_slots(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state assert state.slot == pre_slot + 1 - assert get_state_root(state, pre_slot) == pre_root + assert get_state_root(spec, state, pre_slot) == pre_root +@with_all_phases @spec_state_test -def test_slots_2(state): +def test_slots_2(spec, state): yield 'pre', state slots = 2 yield 'slots', slots - process_slots(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_empty_epoch(state): +def test_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', slots - process_slots(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_double_empty_epoch(state): +def test_double_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH * 2 yield 'slots', slots - process_slots(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_over_epoch_boundary(state): - process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2)) +def test_over_epoch_boundary(spec, state): + spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2)) yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', slots - process_slots(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state diff --git a/test_libs/pyspec/eth2spec/test/test_finality.py b/test_libs/pyspec/eth2spec/test/test_finality.py index cdd09bf23..801e8b4fd 100644 --- a/test_libs/pyspec/eth2spec/test/test_finality.py +++ b/test_libs/pyspec/eth2spec/test/test_finality.py @@ -1,17 +1,14 @@ from copy import deepcopy +from typing import List -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_current_epoch, - get_epoch_start_slot, -) -from .context import spec_state_test, never_bls -from .helpers.state import next_epoch -from .helpers.block import build_empty_block_for_next_slot, apply_empty_block -from .helpers.attestations import get_valid_attestation +from eth2spec.test.context import spec_state_test, never_bls, with_all_phases +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, apply_empty_block +from eth2spec.test.helpers.attestations import get_valid_attestation -def check_finality(state, +def check_finality(spec, + state, prev_state, current_justified_changed, previous_justified_changed, @@ -38,22 +35,23 @@ def check_finality(state, assert state.finalized_root == prev_state.finalized_root -def next_epoch_with_attestations(state, +def next_epoch_with_attestations(spec, + state, fill_cur_epoch, fill_prev_epoch): post_state = deepcopy(state) blocks = [] for _ in range(spec.SLOTS_PER_EPOCH): - block = build_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(spec, post_state) if fill_cur_epoch: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)): - cur_attestation = get_valid_attestation(post_state, slot_to_attest) + if slot_to_attest >= spec.get_epoch_start_slot(spec.get_current_epoch(post_state)): + cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest) block.body.attestations.append(cur_attestation) if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - prev_attestation = get_valid_attestation(post_state, slot_to_attest) + prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest) block.body.attestations.append(prev_attestation) spec.state_transition(post_state, block) @@ -62,140 +60,144 @@ def next_epoch_with_attestations(state, return state, blocks, post_state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_4(state): +def test_finality_rule_4(spec, state): yield 'pre', state blocks = [] for epoch in range(4): - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks # justification/finalization skipped at GENESIS_EPOCH if epoch == 0: - check_finality(state, prev_state, False, False, False) + check_finality(spec, state, prev_state, False, False, False) # justification/finalization skipped at GENESIS_EPOCH + 1 elif epoch == 1: - check_finality(state, prev_state, False, False, False) + check_finality(spec, state, prev_state, False, False, False) elif epoch == 2: - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch >= 3: # rule 4 of finality - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.current_justified_epoch assert state.finalized_root == prev_state.current_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_1(state): +def test_finality_rule_1(spec, state): # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks if epoch == 0: - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: - check_finality(state, prev_state, True, True, False) + check_finality(spec, state, prev_state, True, True, False) elif epoch == 2: # finalized by rule 1 - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.previous_justified_epoch assert state.finalized_root == prev_state.previous_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_2(state): +def test_finality_rule_2(spec, state): # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): if epoch == 0: - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) - check_finality(state, prev_state, True, False, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False) - check_finality(state, prev_state, False, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) + check_finality(spec, state, prev_state, False, True, False) elif epoch == 2: - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) # finalized by rule 2 - check_finality(state, prev_state, True, False, True) + check_finality(spec, state, prev_state, True, False, True) assert state.finalized_epoch == prev_state.previous_justified_epoch assert state.finalized_root == prev_state.previous_justified_root blocks += new_blocks - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_3(state): +def test_finality_rule_3(spec, state): """ Test scenario described here https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892 """ # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) # In epoch N, JE is set to N, prev JE is set to N-1 - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) # In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) blocks += new_blocks - check_finality(state, prev_state, False, True, False) + check_finality(spec, state, prev_state, False, True, False) # In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1. # N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2 - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks # rule 2 - check_finality(state, prev_state, True, False, True) + check_finality(spec, state, prev_state, True, False, True) # In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3. - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True) blocks += new_blocks # rule 3 - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.current_justified_epoch assert state.finalized_root == prev_state.current_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state diff --git a/test_libs/pyspec/eth2spec/utils/hash_function.py b/test_libs/pyspec/eth2spec/utils/hash_function.py index acd13edc4..f965827d0 100644 --- a/test_libs/pyspec/eth2spec/utils/hash_function.py +++ b/test_libs/pyspec/eth2spec/utils/hash_function.py @@ -1,4 +1,5 @@ from hashlib import sha256 -def hash(x): return sha256(x).digest() +def hash(x): + return sha256(x).digest() diff --git a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py index 7c5483de3..c508f0df2 100644 --- a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py +++ b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py @@ -1,7 +1,9 @@ from .hash_function import hash -zerohashes = [b'\x00' * 32] +ZERO_BYTES32 = b'\x00' * 32 + +zerohashes = [ZERO_BYTES32] for layer in range(1, 32): zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1])) @@ -28,3 +30,25 @@ def get_merkle_proof(tree, item_index): subindex = (item_index // 2**i) ^ 1 proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) return proof + + +def next_power_of_two(v: int) -> int: + """ + Get the next power of 2. (for 64 bit range ints). + 0 is a special case, to have non-empty defaults. + Examples: + 0 -> 1, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64 + """ + if v == 0: + return 1 + return 1 << (v - 1).bit_length() + + +def merkleize_chunks(chunks): + tree = chunks[::] + margin = next_power_of_two(len(chunks)) - len(chunks) + tree.extend([ZERO_BYTES32] * margin) + tree = [ZERO_BYTES32] * len(tree) + tree + for i in range(len(tree) // 2 - 1, 0, -1): + tree[i] = hash(tree[i * 2] + tree[i * 2 + 1]) + return tree[1] diff --git a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py deleted file mode 100644 index 9cc2baebb..000000000 --- a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py +++ /dev/null @@ -1,331 +0,0 @@ -from typing import Any - -from .hash_function import hash - -BYTES_PER_CHUNK = 32 -BYTES_PER_LENGTH_OFFSET = 4 -ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK - - -def SSZType(fields): - class SSZObject(): - def __init__(self, **kwargs): - for f, t in fields.items(): - if f not in kwargs: - setattr(self, f, get_zero_value(t)) - else: - setattr(self, f, kwargs[f]) - - def __eq__(self, other): - return self.fields == other.fields and self.serialize() == other.serialize() - - def __hash__(self): - return int.from_bytes(self.hash_tree_root(), byteorder="little") - - def __str__(self): - output = [] - for field in self.fields: - output.append(f'{field}: {getattr(self, field)}') - return "\n".join(output) - - def serialize(self): - return serialize_value(self, self.__class__) - - def hash_tree_root(self): - return hash_tree_root(self, self.__class__) - - SSZObject.fields = fields - return SSZObject - - -class Vector(): - def __init__(self, items): - self.items = items - self.length = len(items) - - def __getitem__(self, key): - return self.items[key] - - def __setitem__(self, key, value): - self.items[key] = value - - def __iter__(self): - return iter(self.items) - - def __len__(self): - return self.length - - -def is_basic(typ): - # if not a string, it is a complex, and cannot be basic - if not isinstance(typ, str): - return False - # "uintN": N-bit unsigned integer (where N in [8, 16, 32, 64, 128, 256]) - elif typ[:4] == 'uint' and typ[4:] in ['8', '16', '32', '64', '128', '256']: - return True - # "bool": True or False - elif typ == 'bool': - return True - # alias: "byte" -> "uint8" - elif typ == 'byte': - return True - # default - else: - return False - - -def is_constant_sized(typ): - # basic objects are fixed size by definition - if is_basic(typ): - return True - # dynamic size array type, "list": [elem_type]. - # Not constant size by definition. - elif isinstance(typ, list) and len(typ) == 1: - return False - # fixed size array type, "vector": [elem_type, length] - # Constant size, but only if the elements are. - elif isinstance(typ, list) and len(typ) == 2: - return is_constant_sized(typ[0]) - # bytes array (fixed or dynamic size) - elif isinstance(typ, str) and typ[:5] == 'bytes': - # if no length suffix, it has a dynamic size - return typ != 'bytes' - # containers are only constant-size if all of the fields are constant size. - elif hasattr(typ, 'fields'): - for subtype in typ.fields.values(): - if not is_constant_sized(subtype): - return False - return True - else: - raise Exception("Type not recognized") - - -def coerce_to_bytes(x): - if isinstance(x, str): - o = x.encode('utf-8') - assert len(o) == len(x) - return o - elif isinstance(x, bytes): - return x - else: - raise Exception("Expecting bytes") - - -def encode_series(values, types): - # Recursively serialize - parts = [(is_constant_sized(types[i]), serialize_value(values[i], types[i])) for i in range(len(values))] - - # Compute and check lengths - fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET - for (constant_size, serialized) in parts] - variable_lengths = [len(serialized) if not constant_size else 0 - for (constant_size, serialized) in parts] - - # Check if integer is not out of bounds (Python) - assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * 8) - - # Interleave offsets of variable-size parts with fixed-size parts. - # Avoid quadratic complexity in calculation of offsets. - offset = sum(fixed_lengths) - variable_parts = [] - fixed_parts = [] - for (constant_size, serialized) in parts: - if constant_size: - fixed_parts.append(serialized) - else: - fixed_parts.append(offset.to_bytes(BYTES_PER_LENGTH_OFFSET, 'little')) - variable_parts.append(serialized) - offset += len(serialized) - - # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts - return b"".join(fixed_parts + variable_parts) - - -def serialize_value(value, typ=None): - if typ is None: - typ = infer_type(value) - # "uintN" - if isinstance(typ, str) and typ[:4] == 'uint': - length = int(typ[4:]) - assert length in (8, 16, 32, 64, 128, 256) - return value.to_bytes(length // 8, 'little') - # "bool" - elif isinstance(typ, str) and typ == 'bool': - assert value in (True, False) - return b'\x01' if value is True else b'\x00' - # Vector - elif isinstance(typ, list) and len(typ) == 2: - # (regardless of element type, sanity-check if the length reported in the vector type matches the value length) - assert len(value) == typ[1] - return encode_series(value, [typ[0]] * len(value)) - # List - elif isinstance(typ, list) and len(typ) == 1: - return encode_series(value, [typ[0]] * len(value)) - # "bytes" (variable size) - elif isinstance(typ, str) and typ == 'bytes': - return coerce_to_bytes(value) - # "bytesN" (fixed size) - elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes': - assert len(value) == int(typ[5:]), (value, int(typ[5:])) - return coerce_to_bytes(value) - # containers - elif hasattr(typ, 'fields'): - values = [getattr(value, field) for field in typ.fields.keys()] - types = list(typ.fields.values()) - return encode_series(values, types) - else: - print(value, typ) - raise Exception("Type not recognized") - - -def get_zero_value(typ: Any) -> Any: - if isinstance(typ, str): - # Bytes array - if typ == 'bytes': - return b'' - # bytesN - elif typ[:5] == 'bytes' and len(typ) > 5: - length = int(typ[5:]) - return b'\x00' * length - # Basic types - elif typ == 'bool': - return False - elif typ[:4] == 'uint': - return 0 - elif typ == 'byte': - return 0x00 - else: - raise ValueError("Type not recognized") - # Vector: - elif isinstance(typ, list) and len(typ) == 2: - return [get_zero_value(typ[0]) for _ in range(typ[1])] - # List: - elif isinstance(typ, list) and len(typ) == 1: - return [] - # Container: - elif hasattr(typ, 'fields'): - return typ(**{field: get_zero_value(subtype) for field, subtype in typ.fields.items()}) - else: - print(typ) - raise Exception("Type not recognized") - - -def chunkify(bytez): - bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) - return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] - - -def pack(values, subtype): - return chunkify(b''.join([serialize_value(value, subtype) for value in values])) - - -def is_power_of_two(x): - return x > 0 and x & (x - 1) == 0 - - -def merkleize(chunks): - tree = chunks[::] - while not is_power_of_two(len(tree)): - tree.append(ZERO_CHUNK) - tree = [ZERO_CHUNK] * len(tree) + tree - for i in range(len(tree) // 2 - 1, 0, -1): - tree[i] = hash(tree[i * 2] + tree[i * 2 + 1]) - return tree[1] - - -def mix_in_length(root, length): - return hash(root + length.to_bytes(32, 'little')) - - -def infer_type(value): - """ - Note: defaults to uint64 for integer type inference due to lack of information. - Other integer sizes are still supported, see spec. - :param value: The value to infer a SSZ type for. - :return: The SSZ type. - """ - if hasattr(value.__class__, 'fields'): - return value.__class__ - elif isinstance(value, Vector): - if len(value) > 0: - return [infer_type(value[0]), len(value)] - else: - # Element type does not matter too much, - # assumed to be a basic type for size-encoding purposes, vector is empty. - return ['uint64'] - elif isinstance(value, list): - if len(value) > 0: - return [infer_type(value[0])] - else: - # Element type does not matter, list-content size will be encoded regardless, list is empty. - return ['uint64'] - elif isinstance(value, (bytes, str)): - return 'bytes' - elif isinstance(value, int): - return 'uint64' - else: - raise Exception("Failed to infer type") - - -def hash_tree_root(value, typ=None): - if typ is None: - typ = infer_type(value) - # ------------------------------------- - # merkleize(pack(value)) - # basic object: merkleize packed version (merkleization pads it to 32 bytes if it is not already) - if is_basic(typ): - return merkleize(pack([value], typ)) - # or a vector of basic objects - elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]): - assert len(value) == typ[1] - return merkleize(pack(value, typ[0])) - # ------------------------------------- - # mix_in_length(merkleize(pack(value)), len(value)) - # if value is a list of basic objects - elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): - return mix_in_length(merkleize(pack(value, typ[0])), len(value)) - # (needs some extra work for non-fixed-sized bytes array) - elif typ == 'bytes': - return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value)) - # ------------------------------------- - # merkleize([hash_tree_root(element) for element in value]) - # if value is a vector of composite objects - elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): - return merkleize([hash_tree_root(element, typ[0]) for element in value]) - # (needs some extra work for fixed-sized bytes array) - elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5: - assert len(value) == int(typ[5:]) - return merkleize(chunkify(coerce_to_bytes(value))) - # or a container - elif hasattr(typ, 'fields'): - return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) - # ------------------------------------- - # mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value)) - # if value is a list of composite objects - elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): - return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) - # ------------------------------------- - else: - raise Exception("Type not recognized") - - -def truncate(container): - field_keys = list(container.fields.keys()) - truncated_fields = { - key: container.fields[key] - for key in field_keys[:-1] - } - truncated_class = SSZType(truncated_fields) - kwargs = { - field: getattr(container, field) - for field in field_keys[:-1] - } - return truncated_class(**kwargs) - - -def signing_root(container): - return hash_tree_root(truncate(container)) - - -def serialize(ssz_object): - return getattr(ssz_object, 'serialize')() diff --git a/test_libs/pyspec/eth2spec/utils/ssz/__init__.py b/test_libs/pyspec/eth2spec/utils/ssz/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py new file mode 100644 index 000000000..b3c877d48 --- /dev/null +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py @@ -0,0 +1,163 @@ +from ..merkle_minimal import merkleize_chunks, hash +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_container_type, + is_list_kind, is_vector_kind, + read_vector_elem_type, read_elem_type, + uint_byte_size, + infer_input_type, + get_zero_value, +) + +# SSZ Serialization +# ----------------------------- + +BYTES_PER_LENGTH_OFFSET = 4 + + +def is_basic_type(typ): + return is_uint_type(typ) or is_bool_type(typ) + + +def serialize_basic(value, typ): + if is_uint_type(typ): + return value.to_bytes(uint_byte_size(typ), 'little') + elif is_bool_type(typ): + if value: + return b'\x01' + else: + return b'\x00' + else: + raise Exception("Type not supported: {}".format(typ)) + + +def deserialize_basic(value, typ): + if is_uint_type(typ): + return typ(int.from_bytes(value, 'little')) + elif is_bool_type(typ): + assert value in (b'\x00', b'\x01') + return True if value == b'\x01' else False + else: + raise Exception("Type not supported: {}".format(typ)) + + +def is_fixed_size(typ): + if is_basic_type(typ): + return True + elif is_list_kind(typ): + return False + elif is_vector_kind(typ): + return is_fixed_size(read_vector_elem_type(typ)) + elif is_container_type(typ): + return all(is_fixed_size(t) for t in typ.get_field_types()) + else: + raise Exception("Type not supported: {}".format(typ)) + + +def is_empty(obj): + return get_zero_value(type(obj)) == obj + + +@infer_input_type +def serialize(obj, typ=None): + if is_basic_type(typ): + return serialize_basic(obj, typ) + elif is_list_kind(typ) or is_vector_kind(typ): + return encode_series(obj, [read_elem_type(typ)] * len(obj)) + elif is_container_type(typ): + return encode_series(obj.get_field_values(), typ.get_field_types()) + else: + raise Exception("Type not supported: {}".format(typ)) + + +def encode_series(values, types): + # bytes and bytesN are already in the right format. + if isinstance(values, bytes): + return values + + # Recursively serialize + parts = [(is_fixed_size(types[i]), serialize(values[i], typ=types[i])) for i in range(len(values))] + + # Compute and check lengths + fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET + for (constant_size, serialized) in parts] + variable_lengths = [len(serialized) if not constant_size else 0 + for (constant_size, serialized) in parts] + + # Check if integer is not out of bounds (Python) + assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * 8) + + # Interleave offsets of variable-size parts with fixed-size parts. + # Avoid quadratic complexity in calculation of offsets. + offset = sum(fixed_lengths) + variable_parts = [] + fixed_parts = [] + for (constant_size, serialized) in parts: + if constant_size: + fixed_parts.append(serialized) + else: + fixed_parts.append(offset.to_bytes(BYTES_PER_LENGTH_OFFSET, 'little')) + variable_parts.append(serialized) + offset += len(serialized) + + # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts + return b''.join(fixed_parts + variable_parts) + + +# SSZ Hash-tree-root +# ----------------------------- + + +def pack(values, subtype): + if isinstance(values, bytes): + return values + return b''.join([serialize_basic(value, subtype) for value in values]) + + +def chunkify(bytez): + # pad `bytez` to nearest 32-byte multiple + bytez += b'\x00' * (-len(bytez) % 32) + return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] + + +def mix_in_length(root, length): + return hash(root + length.to_bytes(32, 'little')) + + +def is_bottom_layer_kind(typ): + return ( + is_basic_type(typ) or + (is_list_kind(typ) or is_vector_kind(typ)) and is_basic_type(read_elem_type(typ)) + ) + + +@infer_input_type +def get_typed_values(obj, typ=None): + if is_container_type(typ): + return obj.get_typed_values() + elif is_list_kind(typ) or is_vector_kind(typ): + elem_type = read_elem_type(typ) + return list(zip(obj, [elem_type] * len(obj))) + else: + raise Exception("Invalid type") + + +@infer_input_type +def hash_tree_root(obj, typ=None): + if is_bottom_layer_kind(typ): + data = serialize_basic(obj, typ) if is_basic_type(typ) else pack(obj, read_elem_type(typ)) + leaves = chunkify(data) + else: + fields = get_typed_values(obj, typ=typ) + leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in fields] + if is_list_kind(typ): + return mix_in_length(merkleize_chunks(leaves), len(obj)) + else: + return merkleize_chunks(leaves) + + +@infer_input_type +def signing_root(obj, typ): + assert is_container_type(typ) + # ignore last field + leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in obj.get_typed_values()[:-1]] + return merkleize_chunks(chunkify(b''.join(leaves))) diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py new file mode 100644 index 000000000..368041f90 --- /dev/null +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -0,0 +1,525 @@ +from typing import List, Iterable, TypeVar, Type, NewType +from typing import Union +from typing_inspect import get_origin + +# SSZ integers +# ----------------------------- + + +class uint(int): + byte_len = 0 + + def __new__(cls, value, *args, **kwargs): + if value < 0: + raise ValueError("unsigned types must not be negative") + return super().__new__(cls, value) + + +class uint8(uint): + byte_len = 1 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 8: + raise ValueError("value out of bounds for uint8") + return super().__new__(cls, value) + + +# Alias for uint8 +byte = NewType('byte', uint8) + + +class uint16(uint): + byte_len = 2 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 16: + raise ValueError("value out of bounds for uint16") + return super().__new__(cls, value) + + +class uint32(uint): + byte_len = 4 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 32: + raise ValueError("value out of bounds for uint16") + return super().__new__(cls, value) + + +# We simply default to uint64. But do give it a name, for readability +uint64 = NewType('uint64', int) + + +class uint128(uint): + byte_len = 16 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 128: + raise ValueError("value out of bounds for uint128") + return super().__new__(cls, value) + + +class uint256(uint): + byte_len = 32 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 256: + raise ValueError("value out of bounds for uint256") + return super().__new__(cls, value) + + +def is_uint_type(typ): + # All integers are uint in the scope of the spec here. + # Since we default to uint64. Bounds can be checked elsewhere. + # However, some are wrapped in a NewType + if hasattr(typ, '__supertype__'): + # get the type that the NewType is wrapping + typ = typ.__supertype__ + + return isinstance(typ, type) and issubclass(typ, int) and not issubclass(typ, bool) + + +def uint_byte_size(typ): + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + + if isinstance(typ, type): + if issubclass(typ, uint): + return typ.byte_len + elif issubclass(typ, int): + # Default to uint64 + return 8 + else: + raise TypeError("Type %s is not an uint (or int-default uint64) type" % typ) + + +# SSZ Container base class +# ----------------------------- + +# Note: importing ssz functionality locally, to avoid import loop + +class Container(object): + + def __init__(self, **kwargs): + cls = self.__class__ + for f, t in cls.get_fields(): + if f not in kwargs: + setattr(self, f, get_zero_value(t)) + else: + setattr(self, f, kwargs[f]) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + def signing_root(self): + from .ssz_impl import signing_root + return signing_root(self, self.__class__) + + def get_field_values(self): + cls = self.__class__ + return [getattr(self, field) for field in cls.get_field_names()] + + def __repr__(self): + return repr({field: getattr(self, field) for field in self.get_field_names()}) + + def __str__(self): + output = [] + for field in self.get_field_names(): + output.append(f'{field}: {getattr(self, field)}') + return "\n".join(output) + + def __eq__(self, other): + return self.hash_tree_root() == other.hash_tree_root() + + def __hash__(self): + return hash(self.hash_tree_root()) + + @classmethod + def get_fields_dict(cls): + return dict(cls.__annotations__) + + @classmethod + def get_fields(cls): + return list(dict(cls.__annotations__).items()) + + def get_typed_values(self): + return list(zip(self.get_field_values(), self.get_field_types())) + + @classmethod + def get_field_names(cls): + return list(cls.__annotations__.keys()) + + @classmethod + def get_field_types(cls): + # values of annotations are the types corresponding to the fields, not instance values. + return list(cls.__annotations__.values()) + + +# SSZ vector +# ----------------------------- + + +def _is_vector_instance_of(a, b): + # Other must not be a BytesN + if issubclass(b, bytes): + return False + elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector (b) is not an instance of Vector[X, Y] (a) + return False + elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'): + # Vector[X, Y] (b) is an instance of Vector (a) + return True + else: + # Vector[X, Y] (a) is an instance of Vector[X, Y] (b) + return a.elem_type == b.elem_type and a.length == b.length + + +def _is_equal_vector_type(a, b): + # Other must not be a BytesN + if issubclass(b, bytes): + return False + elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'): + if not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector == Vector + return True + else: + # Vector != Vector[X, Y] + return False + elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector[X, Y] != Vector + return False + else: + # Vector[X, Y] == Vector[X, Y] + return a.elem_type == b.elem_type and a.length == b.length + + +class VectorMeta(type): + def __new__(cls, class_name, parents, attrs): + out = type.__new__(cls, class_name, parents, attrs) + if 'elem_type' in attrs and 'length' in attrs: + setattr(out, 'elem_type', attrs['elem_type']) + setattr(out, 'length', attrs['length']) + return out + + def __getitem__(self, params): + if not isinstance(params, tuple) or len(params) != 2: + raise Exception("Vector must be instantiated with two args: elem type and length") + o = self.__class__(self.__name__, (Vector,), {'elem_type': params[0], 'length': params[1]}) + o._name = 'Vector' + return o + + def __subclasscheck__(self, sub): + return _is_vector_instance_of(self, sub) + + def __instancecheck__(self, other): + return _is_vector_instance_of(self, other.__class__) + + def __eq__(self, other): + return _is_equal_vector_type(self, other) + + def __ne__(self, other): + return not _is_equal_vector_type(self, other) + + def __hash__(self): + return hash(self.__class__) + + +class Vector(metaclass=VectorMeta): + + def __init__(self, *args: Iterable): + cls = self.__class__ + if not hasattr(cls, 'elem_type'): + raise TypeError("Type Vector without elem_type data cannot be instantiated") + elif not hasattr(cls, 'length'): + raise TypeError("Type Vector without length data cannot be instantiated") + + if len(args) != cls.length: + if len(args) == 0: + args = [get_zero_value(cls.elem_type) for _ in range(cls.length)] + else: + raise TypeError("Typed vector with length %d cannot hold %d items" % (cls.length, len(args))) + + self.items = list(args) + + # cannot check non-type objects, or parametrized types + if isinstance(cls.elem_type, type) and not hasattr(cls.elem_type, '__args__'): + for i, item in enumerate(self.items): + if not issubclass(type(item), cls.elem_type): + raise TypeError("Typed vector cannot hold differently typed value" + " at index %d. Got type: %s, expected type: %s" % (i, type(item), cls.elem_type)) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + def __repr__(self): + return repr({'length': self.__class__.length, 'items': self.items}) + + def __getitem__(self, key): + return self.items[key] + + def __setitem__(self, key, value): + self.items[key] = value + + def __iter__(self): + return iter(self.items) + + def __len__(self): + return len(self.items) + + def __eq__(self, other): + return self.hash_tree_root() == other.hash_tree_root() + + +# SSZ BytesN +# ----------------------------- + + +def _is_bytes_n_instance_of(a, b): + # Other has to be a Bytes derivative class to be a BytesN + if not issubclass(b, bytes): + return False + elif not hasattr(b, 'length'): + # BytesN (b) is not an instance of BytesN[X] (a) + return False + elif not hasattr(a, 'length'): + # BytesN[X] (b) is an instance of BytesN (a) + return True + else: + # BytesN[X] (a) is an instance of BytesN[X] (b) + return a.length == b.length + + +def _is_equal_bytes_n_type(a, b): + # Other has to be a Bytes derivative class to be a BytesN + if not issubclass(b, bytes): + return False + elif not hasattr(a, 'length'): + if not hasattr(b, 'length'): + # BytesN == BytesN + return True + else: + # BytesN != BytesN[X] + return False + elif not hasattr(b, 'length'): + # BytesN[X] != BytesN + return False + else: + # BytesN[X] == BytesN[X] + return a.length == b.length + + +class BytesNMeta(type): + def __new__(cls, class_name, parents, attrs): + out = type.__new__(cls, class_name, parents, attrs) + if 'length' in attrs: + setattr(out, 'length', attrs['length']) + out._name = 'BytesN' + out.elem_type = byte + return out + + def __getitem__(self, n): + return self.__class__(self.__name__, (BytesN,), {'length': n}) + + def __subclasscheck__(self, sub): + return _is_bytes_n_instance_of(self, sub) + + def __instancecheck__(self, other): + return _is_bytes_n_instance_of(self, other.__class__) + + def __eq__(self, other): + return _is_equal_bytes_n_type(self, other) + + def __ne__(self, other): + return not _is_equal_bytes_n_type(self, other) + + def __hash__(self): + return hash(self.__class__) + + +def parse_bytes(val): + if val is None: + return None + elif isinstance(val, str): + # TODO: import from eth-utils instead, and do: hexstr_if_str(to_bytes, val) + return None + elif isinstance(val, bytes): + return val + elif isinstance(val, int): + return bytes([val]) + else: + return None + + +class BytesN(bytes, metaclass=BytesNMeta): + def __new__(cls, *args): + if not hasattr(cls, 'length'): + return + bytesval = None + if len(args) == 1: + val: Union[bytes, int, str] = args[0] + bytesval = parse_bytes(val) + elif len(args) > 1: + # TODO: each int is 1 byte, check size, create bytesval + bytesval = bytes(args) + + if bytesval is None: + if cls.length == 0: + bytesval = b'' + else: + bytesval = b'\x00' * cls.length + if len(bytesval) != cls.length: + raise TypeError("BytesN[%d] cannot be initialized with value of %d bytes" % (cls.length, len(bytesval))) + return super().__new__(cls, bytesval) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + +# SSZ Defaults +# ----------------------------- +def get_zero_value(typ): + if is_uint_type(typ): + return 0 + elif is_list_type(typ): + return [] + elif is_bool_type(typ): + return False + elif is_vector_type(typ): + return typ() + elif is_bytesn_type(typ): + return typ() + elif is_bytes_type(typ): + return b'' + elif is_container_type(typ): + return typ(**{f: get_zero_value(t) for f, t in typ.get_fields()}) + else: + raise Exception("Type not supported: {}".format(typ)) + + +# Type helpers +# ----------------------------- + + +def infer_type(obj): + if is_uint_type(obj.__class__): + return obj.__class__ + elif isinstance(obj, int): + return uint64 + elif isinstance(obj, list): + return List[infer_type(obj[0])] + elif isinstance(obj, (Vector, Container, bool, BytesN, bytes)): + return obj.__class__ + else: + raise Exception("Unknown type for {}".format(obj)) + + +def infer_input_type(fn): + """ + Decorator to run infer_type on the obj if typ argument is None + """ + def infer_helper(obj, typ=None, **kwargs): + if typ is None: + typ = infer_type(obj) + return fn(obj, typ=typ, **kwargs) + return infer_helper + + +def is_bool_type(typ): + """ + Check if the given type is a bool. + """ + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + return isinstance(typ, type) and issubclass(typ, bool) + + +def is_list_type(typ): + """ + Check if the given type is a list. + """ + return get_origin(typ) is List or get_origin(typ) is list + + +def is_bytes_type(typ): + """ + Check if the given type is a ``bytes``. + """ + # Do not accept subclasses of bytes here, to avoid confusion with BytesN + return typ == bytes + + +def is_bytesn_type(typ): + """ + Check if the given type is a BytesN. + """ + return isinstance(typ, type) and issubclass(typ, BytesN) + + +def is_list_kind(typ): + """ + Check if the given type is a kind of list. Can be bytes. + """ + return is_list_type(typ) or is_bytes_type(typ) + + +def is_vector_type(typ): + """ + Check if the given type is a vector. + """ + return isinstance(typ, type) and issubclass(typ, Vector) + + +def is_vector_kind(typ): + """ + Check if the given type is a kind of vector. Can be BytesN. + """ + return is_vector_type(typ) or is_bytesn_type(typ) + + +def is_container_type(typ): + """ + Check if the given type is a container. + """ + return isinstance(typ, type) and issubclass(typ, Container) + + +T = TypeVar('T') +L = TypeVar('L') + + +def read_list_elem_type(list_typ: Type[List[T]]) -> T: + if list_typ.__args__ is None or len(list_typ.__args__) != 1: + raise TypeError("Supplied list-type is invalid, no element type found.") + return list_typ.__args__[0] + + +def read_vector_elem_type(vector_typ: Type[Vector[T, L]]) -> T: + return vector_typ.elem_type + + +def read_elem_type(typ): + if typ == bytes: + return byte + elif is_list_type(typ): + return read_list_elem_type(typ) + elif is_vector_type(typ): + return read_vector_elem_type(typ) + elif issubclass(typ, bytes): # bytes or bytesN + return byte + else: + raise TypeError("Unexpected type: {}".format(typ)) diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt index 388a878a9..331d0fa28 100644 --- a/test_libs/pyspec/requirements-testing.txt +++ b/test_libs/pyspec/requirements-testing.txt @@ -1,3 +1,4 @@ -r requirements.txt pytest>=3.6,<3.7 ../config_helpers +flake8==3.7.7 diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt index 78d41708d..3b38930bd 100644 --- a/test_libs/pyspec/requirements.txt +++ b/test_libs/pyspec/requirements.txt @@ -2,3 +2,4 @@ eth-utils>=1.3.0,<2 eth-typing>=2.1.0,<3.0.0 pycryptodome==3.7.3 py_ecc>=1.6.0 +typing_inspect==0.4.0 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 1a131a417..e99b911ee 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -9,5 +9,6 @@ setup( "eth-typing>=2.1.0,<3.0.0", "pycryptodome==3.7.3", "py_ecc>=1.6.0", + "typing_inspect==0.4.0" ] ) diff --git a/test_libs/pyspec/tests/block_processing_phase1/phase1_test_process_randao_key_reveal.py b/test_libs/pyspec/tests/block_processing_phase1/phase1_test_process_randao_key_reveal.py deleted file mode 100644 index 0be8ab4a9..000000000 --- a/test_libs/pyspec/tests/block_processing_phase1/phase1_test_process_randao_key_reveal.py +++ /dev/null @@ -1,109 +0,0 @@ -from copy import deepcopy -import pytest - -import eth2spec.phase1.spec as spec -from eth2spec.phase1.spec import ( - get_current_epoch, - process_randao_key_reveal, - RANDAO_PENALTY_EPOCHS, - CUSTODY_PERIOD_TO_RANDAO_PADDING, - RANDAO_PENALTY_MAX_FUTURE_EPOCHS, -) -from tests.helpers_phase1 import ( - get_valid_randao_key_reveal, -) - -mark entire file as 'randao_key_reveals' -pytestmark = pytest.mark.randao_key_reveals - - -def run_randao_key_reveal_processing(state, randao_key_reveal, valid=True): - """ - Run ``process_randao_key_reveal`` returning the pre and post state. - If ``valid == False``, run expecting ``AssertionError`` - """ - post_state = deepcopy(state) - - if not valid: - with pytest.raises(AssertionError): - process_randao_key_reveal(post_state, randao_key_reveal) - return state, None - - process_randao_key_reveal(post_state, randao_key_reveal) - - slashed_validator = post_state.validator_registry[randao_key_reveal.revealed_index] - - if randao_key_reveal.epoch >= get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING: - assert slashed_validator.slashed - assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH - assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - # lost whistleblower reward - # FIXME: Currently broken because get_base_reward in genesis epoch is 0 - assert ( - post_state.balances[randao_key_reveal.revealed_index] < - state.balances[randao_key_reveal.revealed_index] - ) - - return state, post_state - - -def test_success(state): - randao_key_reveal = get_valid_randao_key_reveal(state) - - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal) - - return pre_state, randao_key_reveal, post_state - - -def test_reveal_from_current_epoch(state): - randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state)) - - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, False) - - return pre_state, randao_key_reveal, post_state - -# Not currently possible as we are testing at epoch 0 -# -#def test_reveal_from_past_epoch(state): -# randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state) - 1) -# -# pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, False) -# -# return pre_state, randao_key_reveal, post_state - -def test_reveal_with_custody_padding(state): - randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING) - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, True) - - return pre_state, randao_key_reveal, post_state - -def test_reveal_with_custody_padding_minus_one(state): - randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING - 1) - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, True) - - return pre_state, randao_key_reveal, post_state - -def test_double_reveal(state): - - randao_key_reveal1 = get_valid_randao_key_reveal(state, get_current_epoch(state) + RANDAO_PENALTY_EPOCHS + 1) - pre_state, intermediate_state = run_randao_key_reveal_processing(state, randao_key_reveal1) - - randao_key_reveal2 = get_valid_randao_key_reveal(intermediate_state, get_current_epoch(pre_state) + RANDAO_PENALTY_EPOCHS + 1) - intermediate_state_, post_state = run_randao_key_reveal_processing(intermediate_state, randao_key_reveal2, False) - - return pre_state, [randao_key_reveal1, randao_key_reveal2], post_state - -def test_revealer_is_slashed(state): - randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state)) - state.validator_registry[randao_key_reveal.revealed_index].slashed = True - - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, False) - - return pre_state, randao_key_reveal, post_state - -def test_far_future_epoch(state): - randao_key_reveal = get_valid_randao_key_reveal(state, get_current_epoch(state) + RANDAO_PENALTY_MAX_FUTURE_EPOCHS) - - pre_state, post_state = run_randao_key_reveal_processing(state, randao_key_reveal, False) - - return pre_state, randao_key_reveal, post_state diff --git a/test_libs/pyspec/tests/helpers_phase1.py b/test_libs/pyspec/tests/helpers_phase1.py deleted file mode 100644 index aba93e159..000000000 --- a/test_libs/pyspec/tests/helpers_phase1.py +++ /dev/null @@ -1,50 +0,0 @@ -from py_ecc import bls - -import eth2spec.phase1.spec as spec -from eth2spec.phase0.spec import ( - # constants - ZERO_HASH, - CUSTODY_PERIOD_TO_RANDAO_PADDING, - # SSZ - RandaoKeyReveal, - # functions - get_active_validator_indices, - get_current_epoch, - get_domain, - hash_tree_root, -) - -def get_valid_randao_key_reveal(state, epoch=None): - current_epoch = get_current_epoch(state) - revealed_index = get_active_validator_indices(state, current_epoch)[-1] - masker_index = get_active_validator_indices(state, current_epoch)[0] - - if epoch is None: - epoch = current_epoch + CUSTODY_PERIOD_TO_RANDAO_PADDING - - reveal = bls.sign( - message_hash=hash_tree_root(epoch), - privkey=pubkey_to_privkey[state.validator_registry[revealed_index].pubkey], - domain=get_domain( - state=state, - domain_type=spec.DOMAIN_RANDAO, - message_epoch=epoch, - ), - ) - mask = bls.sign( - message_hash=hash_tree_root(epoch), - privkey=pubkey_to_privkey[state.validator_registry[masker_index].pubkey], - domain=get_domain( - state=state, - domain_type=spec.DOMAIN_RANDAO, - message_epoch=epoch, - ), - ) - - return RandaoKeyReveal( - revealed_index=revealed_index, - epoch=epoch, - reveal=reveal, - masker_index=masker_index, - mask=mask, - )