mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-02-01 22:45:09 -05:00
Merge pull request #2230 from ethereum/dev
Altair pre-release v1.1.0-alpha.1
This commit is contained in:
@@ -35,13 +35,13 @@ commands:
|
||||
description: "Restore the cache with pyspec keys"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
save_pyspec_cached_venv:
|
||||
description: Save a venv into a cache with pyspec keys"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
venv_path: ./venv
|
||||
restore_deposit_contract_tester_cached_venv:
|
||||
@@ -216,15 +216,17 @@ workflows:
|
||||
- lint:
|
||||
requires:
|
||||
- test
|
||||
- install_deposit_contract_web3_tester:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- test_deposit_contract_web3_tests:
|
||||
requires:
|
||||
- install_deposit_contract_web3_tester
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - install_deposit_contract_web3_tester:
|
||||
# requires:
|
||||
# - checkout_specs
|
||||
# - test_deposit_contract_web3_tests:
|
||||
# requires:
|
||||
# - install_deposit_contract_web3_tester
|
||||
build_and_test_deposit_contract:
|
||||
jobs:
|
||||
- build_deposit_contract
|
||||
- test_deposit_contract:
|
||||
requires:
|
||||
- build_deposit_contract
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - test_deposit_contract:
|
||||
# requires:
|
||||
# - build_deposit_contract
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -17,6 +17,7 @@ eth2.0-spec-tests/
|
||||
# Dynamically built from Markdown spec
|
||||
tests/core/pyspec/eth2spec/phase0/
|
||||
tests/core/pyspec/eth2spec/phase1/
|
||||
tests/core/pyspec/eth2spec/altair/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
|
||||
27
Makefile
27
Makefile
@@ -20,13 +20,14 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
||||
# To check generator matching:
|
||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
|
||||
|
||||
CURRENT_DIR = ${CURDIR}
|
||||
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
|
||||
GENERATOR_ERROR_LOG_FILE = $(CURRENT_DIR)/$(TEST_VECTOR_DIR)/testgen_error_log.txt
|
||||
|
||||
export DAPP_SKIP_BUILD:=1
|
||||
export DAPP_SRC:=$(SOLIDITY_DEPOSIT_CONTRACT_DIR)
|
||||
@@ -35,7 +36,8 @@ export DAPP_JSON:=build/combined.json
|
||||
|
||||
.PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \
|
||||
install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \
|
||||
compile_deposit_contract test_compile_deposit_contract check_toc
|
||||
compile_deposit_contract test_compile_deposit_contract check_toc \
|
||||
detect_generator_incomplete detect_generator_error_log
|
||||
|
||||
all: $(PY_SPEC_ALL_TARGETS)
|
||||
|
||||
@@ -49,6 +51,7 @@ partial_clean:
|
||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
|
||||
rm -rf $(PY_SPEC_DIR)/phase0
|
||||
rm -rf $(PY_SPEC_DIR)/phase1
|
||||
rm -rf $(PY_SPEC_DIR)/altair
|
||||
rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||
rm -rf $(PY_SPEC_DIR)/.coverage
|
||||
rm -rf $(PY_SPEC_DIR)/test-reports
|
||||
@@ -81,19 +84,19 @@ pyspec:
|
||||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
python3.8 -m venv venv; . venv/bin/activate; pip3 install .[lint]; pip3 install -e .[test]
|
||||
python3 -m venv venv; . venv/bin/activate; python3 -m pip install .[lint]; python3 -m pip install -e .[test]
|
||||
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
python3 -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
|
||||
open_cov:
|
||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||
@@ -112,7 +115,7 @@ codespell:
|
||||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1 -p eth2spec.altair
|
||||
|
||||
lint_generators: pyspec
|
||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||
@@ -132,11 +135,11 @@ test_deposit_contract:
|
||||
dapp test -v --fuzz-runs 5
|
||||
|
||||
install_deposit_contract_web3_tester:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; python3 -m pip install -r requirements.txt
|
||||
|
||||
test_deposit_contract_web3_tests:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
||||
python -m pytest .
|
||||
python3 -m pytest .
|
||||
|
||||
# Runs a generator, identified by param 1
|
||||
define run_generator
|
||||
@@ -170,3 +173,9 @@ $(TEST_VECTOR_DIR)/:
|
||||
# (creation of output dir is a dependency)
|
||||
gen_%: $(TEST_VECTOR_DIR)
|
||||
$(call run_generator,$*)
|
||||
|
||||
detect_generator_incomplete: $(TEST_VECTOR_DIR)
|
||||
find $(TEST_VECTOR_DIR) -name "INCOMPLETE"
|
||||
|
||||
detect_generator_error_log: $(TEST_VECTOR_DIR)
|
||||
[ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist"
|
||||
|
||||
22
README.md
22
README.md
@@ -11,29 +11,29 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||
|
||||
[](https://github.com/ethereum/eth2.0-specs/releases/) [](https://badge.fury.io/py/eth2spec)
|
||||
|
||||
|
||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||
|
||||
### Phase 0
|
||||
|
||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||
* [Honest Validator](specs/phase0/validator.md)
|
||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||
|
||||
### Phase 1
|
||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
||||
* [Custody Game](specs/phase1/custody-game.md)
|
||||
* [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
|
||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
||||
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
|
||||
### Altair
|
||||
|
||||
### Phase 2
|
||||
* [Beacon chain changes](specs/altair/beacon-chain.md)
|
||||
* [Altair fork](specs/altair/fork.md)
|
||||
* [Light client sync protocol](specs/altair/sync-protocol.md)
|
||||
|
||||
Phase 2 is still actively in R&D and does not yet have any formal specifications.
|
||||
### Sharding
|
||||
|
||||
See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work.
|
||||
The sharding spec is still actively in R&D; see the most recent available pull request [here](https://github.com/ethereum/eth2.0-specs/pull/2146) and some technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD).
|
||||
|
||||
### Merge
|
||||
|
||||
The merge is still actively in R&D; see an [ethresear.ch](https://ethresear.ch) post describing the proposed basic mechanism [here](https://ethresear.ch/t/the-eth1-eth2-transition/6265) and the section of [ethereum.org](https://ethereum.org) describing the merge at a high level [here](https://ethereum.org/en/eth2/docking/).
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
|
||||
50
configs/mainnet/altair.yaml
Normal file
50
configs/mainnet/altair.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Mainnet preset - Altair
|
||||
|
||||
CONFIG_NAME: "mainnet"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24 (= 50,331,648)
|
||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
|
||||
# 2**6 (= 64)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||
# 2
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10 (= 1,024)
|
||||
SYNC_COMMITTEE_SIZE: 1024
|
||||
# 2**6 (= 64)
|
||||
SYNC_SUBCOMMITTEE_SIZE: 64
|
||||
# 2**2 (= 4)
|
||||
INACTIVITY_SCORE_BIAS: 4
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||
|
||||
|
||||
# Fork
|
||||
# ---------------------------------------------------------------
|
||||
ALTAIR_FORK_VERSION: 0x01000000
|
||||
# TBD
|
||||
ALTAIR_FORK_SLOT: 0
|
||||
|
||||
|
||||
# Sync protocol
|
||||
# ---------------------------------------------------------------
|
||||
# 1
|
||||
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
# 2**13
|
||||
MAX_VALID_LIGHT_CLIENT_UPDATES: 8192
|
||||
# 2**13 (=8192)
|
||||
LIGHT_CLIENT_UPDATE_TIMEOUT: 8192
|
||||
50
configs/minimal/altair.yaml
Normal file
50
configs/minimal/altair.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Minimal preset - Altair
|
||||
|
||||
CONFIG_NAME: "minimal"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24 (= 50,331,648)
|
||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
|
||||
# 2**6 (= 64)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||
# 2
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
SYNC_SUBCOMMITTEE_SIZE: 16
|
||||
# 2**2 (= 4)
|
||||
INACTIVITY_SCORE_BIAS: 4
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||
|
||||
|
||||
# Fork
|
||||
# ---------------------------------------------------------------
|
||||
ALTAIR_FORK_VERSION: 0x01000000
|
||||
# [customized]
|
||||
ALTAIR_FORK_SLOT: 0
|
||||
|
||||
|
||||
# Sync protocol
|
||||
# ---------------------------------------------------------------
|
||||
# 1
|
||||
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
# [customized]
|
||||
MAX_VALID_LIGHT_CLIENT_UPDATES: 32
|
||||
# [customized]
|
||||
LIGHT_CLIENT_UPDATE_TIMEOUT: 32
|
||||
164
setup.py
164
setup.py
@@ -10,10 +10,17 @@ from typing import Dict, NamedTuple, List
|
||||
FUNCTION_REGEX = r'^def [\w_]*'
|
||||
|
||||
|
||||
# Definitions in context.py
|
||||
PHASE0 = 'phase0'
|
||||
ALTAIR = 'altair'
|
||||
PHASE1 = 'phase1'
|
||||
|
||||
|
||||
class SpecObject(NamedTuple):
|
||||
functions: Dict[str, str]
|
||||
custom_types: Dict[str, str]
|
||||
constants: Dict[str, str]
|
||||
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
|
||||
ssz_objects: Dict[str, str]
|
||||
dataclasses: Dict[str, str]
|
||||
|
||||
@@ -35,6 +42,7 @@ def get_spec(file_name: str) -> SpecObject:
|
||||
current_name = None # most recent section title
|
||||
functions: Dict[str, str] = {}
|
||||
constants: Dict[str, str] = {}
|
||||
ssz_dep_constants: Dict[str, str] = {}
|
||||
ssz_objects: Dict[str, str] = {}
|
||||
dataclasses: Dict[str, str] = {}
|
||||
function_matcher = re.compile(FUNCTION_REGEX)
|
||||
@@ -52,8 +60,9 @@ def get_spec(file_name: str) -> SpecObject:
|
||||
else:
|
||||
# Handle function definitions & ssz_objects
|
||||
if pulling_from is not None:
|
||||
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
|
||||
name = line[6:-12]
|
||||
if len(line) > 18 and line[:6] == 'class ' and (line[-12:] == '(Container):' or '(phase' in line):
|
||||
end = -12 if line[-12:] == '(Container):' else line.find('(')
|
||||
name = line[6:end]
|
||||
# Check consistency with markdown header
|
||||
assert name == current_name
|
||||
block_type = CodeBlockType.SSZ
|
||||
@@ -87,10 +96,20 @@ def get_spec(file_name: str) -> SpecObject:
|
||||
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
|
||||
is_constant_def = False
|
||||
if is_constant_def:
|
||||
constants[row[0]] = row[1].replace('**TBD**', '2**32')
|
||||
if row[1].startswith('get_generalized_index'):
|
||||
ssz_dep_constants[row[0]] = row[1]
|
||||
else:
|
||||
constants[row[0]] = row[1].replace('**TBD**', '2**32')
|
||||
elif row[1].startswith('uint') or row[1].startswith('Bytes'):
|
||||
custom_types[row[0]] = row[1]
|
||||
return SpecObject(functions, custom_types, constants, ssz_objects, dataclasses)
|
||||
return SpecObject(
|
||||
functions=functions,
|
||||
custom_types=custom_types,
|
||||
constants=constants,
|
||||
ssz_dep_constants=ssz_dep_constants,
|
||||
ssz_objects=ssz_objects,
|
||||
dataclasses=dataclasses,
|
||||
)
|
||||
|
||||
|
||||
CONFIG_LOADER = '''
|
||||
@@ -156,11 +175,52 @@ SSZObject = TypeVar('SSZObject', bound=View)
|
||||
|
||||
CONFIG_NAME = 'mainnet'
|
||||
'''
|
||||
ALTAIR_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
||||
from eth2spec.config.config_util import apply_constants_config
|
||||
from typing import (
|
||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional, Union
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from lru import LRU
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
Path,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
# Whenever altair is loaded, make sure we have the latest phase0
|
||||
from importlib import reload
|
||||
reload(phase0)
|
||||
|
||||
|
||||
SSZVariableName = str
|
||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||
SSZObject = TypeVar('SSZObject', bound=View)
|
||||
|
||||
CONFIG_NAME = 'mainnet'
|
||||
'''
|
||||
|
||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
def ceillog2(x: int) -> uint64:
|
||||
if x < 1:
|
||||
raise ValueError(f"ceillog2 accepts only positive values, x={x}")
|
||||
return uint64((x - 1).bit_length())
|
||||
|
||||
|
||||
def floorlog2(x: int) -> uint64:
|
||||
if x < 1:
|
||||
raise ValueError(f"floorlog2 accepts only positive values, x={x}")
|
||||
return uint64(x.bit_length() - 1)
|
||||
'''
|
||||
PHASE0_SUNDRY_FUNCTIONS = '''
|
||||
def get_eth1_data(block: Eth1Block) -> Eth1Data:
|
||||
@@ -242,6 +302,35 @@ get_start_shard = cache_this(
|
||||
_get_start_shard, lru_size=SLOTS_PER_EPOCH * 3)'''
|
||||
|
||||
|
||||
ALTAIR_SUNDRY_FUNCTIONS = '''
|
||||
|
||||
def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||
ssz_path = Path(ssz_class)
|
||||
for item in path:
|
||||
ssz_path = ssz_path / item
|
||||
return GeneralizedIndex(ssz_path.gindex())'''
|
||||
|
||||
|
||||
# The constants that depend on SSZ objects
|
||||
# Will verify the value at the end of the spec
|
||||
ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS = {
|
||||
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
|
||||
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
|
||||
}
|
||||
|
||||
|
||||
def is_phase0(fork):
|
||||
return fork == PHASE0
|
||||
|
||||
|
||||
def is_altair(fork):
|
||||
return fork == ALTAIR
|
||||
|
||||
|
||||
def is_phase1(fork):
|
||||
return fork == PHASE1
|
||||
|
||||
|
||||
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_class_objects: Dict[str, str]) -> str:
|
||||
"""
|
||||
Given all the objects that constitute a spec, combine them into a single pyfile.
|
||||
@@ -255,7 +344,7 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_cl
|
||||
)
|
||||
)
|
||||
for k in list(spec_object.functions):
|
||||
if "ceillog2" in k:
|
||||
if "ceillog2" in k or "floorlog2" in k:
|
||||
del spec_object.functions[k]
|
||||
functions_spec = '\n\n'.join(spec_object.functions.values())
|
||||
for k in list(spec_object.constants.keys()):
|
||||
@@ -263,19 +352,33 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_cl
|
||||
spec_object.constants[k] += " # noqa: E501"
|
||||
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))
|
||||
ordered_class_objects_spec = '\n\n'.join(ordered_class_objects.values())
|
||||
|
||||
if is_altair(fork):
|
||||
altair_ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS[x]), ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS))
|
||||
|
||||
spec = (
|
||||
imports
|
||||
+ '\n\n' + f"fork = \'{fork}\'\n"
|
||||
+ '\n\n' + new_type_definitions
|
||||
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
|
||||
# The constants that some SSZ containers require. Need to be defined before `constants_spec`
|
||||
+ ('\n\n' + altair_ssz_dep_constants if is_altair(fork) else '')
|
||||
+ '\n\n' + constants_spec
|
||||
+ '\n\n' + CONFIG_LOADER
|
||||
+ '\n\n' + ordered_class_objects_spec
|
||||
+ '\n\n' + functions_spec
|
||||
# Functions to make pyspec work
|
||||
+ '\n' + PHASE0_SUNDRY_FUNCTIONS
|
||||
+ ('\n' + ALTAIR_SUNDRY_FUNCTIONS if is_altair(fork) else '')
|
||||
+ ('\n' + PHASE1_SUNDRY_FUNCTIONS if is_phase1(fork) else '')
|
||||
)
|
||||
if fork == 'phase1':
|
||||
spec += '\n' + PHASE1_SUNDRY_FUNCTIONS
|
||||
|
||||
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
|
||||
# as same as the spec definition.
|
||||
if is_altair(fork):
|
||||
altair_ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS))
|
||||
spec += '\n\n\n' + altair_ssz_dep_constants_verification
|
||||
|
||||
spec += '\n'
|
||||
return spec
|
||||
|
||||
@@ -297,7 +400,7 @@ ignored_dependencies = [
|
||||
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||
'bytes', 'byte', 'ByteList', 'ByteVector',
|
||||
'Dict', 'dict', 'field',
|
||||
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2',
|
||||
]
|
||||
|
||||
|
||||
@@ -338,19 +441,28 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
||||
"""
|
||||
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
|
||||
"""
|
||||
functions0, custom_types0, constants0, ssz_objects0, dataclasses0 = spec0
|
||||
functions1, custom_types1, constants1, ssz_objects1, dataclasses1 = spec1
|
||||
functions0, custom_types0, constants0, ssz_dep_constants0, ssz_objects0, dataclasses0 = spec0
|
||||
functions1, custom_types1, constants1, ssz_dep_constants1, ssz_objects1, dataclasses1 = spec1
|
||||
functions = combine_functions(functions0, functions1)
|
||||
custom_types = combine_constants(custom_types0, custom_types1)
|
||||
constants = combine_constants(constants0, constants1)
|
||||
ssz_dep_constants = combine_constants(ssz_dep_constants0, ssz_dep_constants1)
|
||||
ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
|
||||
dataclasses = combine_functions(dataclasses0, dataclasses1)
|
||||
return SpecObject(functions, custom_types, constants, ssz_objects, dataclasses)
|
||||
return SpecObject(
|
||||
functions=functions,
|
||||
custom_types=custom_types,
|
||||
constants=constants,
|
||||
ssz_dep_constants=ssz_dep_constants,
|
||||
ssz_objects=ssz_objects,
|
||||
dataclasses=dataclasses,
|
||||
)
|
||||
|
||||
|
||||
fork_imports = {
|
||||
'phase0': PHASE0_IMPORTS,
|
||||
'phase1': PHASE1_IMPORTS,
|
||||
'altair': ALTAIR_IMPORTS,
|
||||
}
|
||||
|
||||
|
||||
@@ -387,7 +499,7 @@ class PySpecCommand(Command):
|
||||
def initialize_options(self):
|
||||
"""Set default values for options."""
|
||||
# Each user option must be listed here with their default value.
|
||||
self.spec_fork = 'phase0'
|
||||
self.spec_fork = PHASE0
|
||||
self.md_doc_paths = ''
|
||||
self.out_dir = 'pyspec_output'
|
||||
|
||||
@@ -396,14 +508,14 @@ class PySpecCommand(Command):
|
||||
if len(self.md_doc_paths) == 0:
|
||||
print("no paths were specified, using default markdown file paths for pyspec"
|
||||
" build (spec fork: %s)" % self.spec_fork)
|
||||
if self.spec_fork == "phase0":
|
||||
if is_phase0(self.spec_fork):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
"""
|
||||
elif self.spec_fork == "phase1":
|
||||
elif is_phase1(self.spec_fork):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
@@ -413,10 +525,20 @@ class PySpecCommand(Command):
|
||||
specs/phase1/beacon-chain.md
|
||||
specs/phase1/shard-transition.md
|
||||
specs/phase1/fork-choice.md
|
||||
specs/phase1/phase1-fork.md
|
||||
specs/phase1/fork.md
|
||||
specs/phase1/shard-fork-choice.md
|
||||
specs/phase1/validator.md
|
||||
"""
|
||||
elif is_altair(self.spec_fork):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
specs/altair/beacon-chain.md
|
||||
specs/altair/fork.md
|
||||
specs/altair/sync-protocol.md
|
||||
"""
|
||||
else:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
|
||||
@@ -516,13 +638,12 @@ setup(
|
||||
url="https://github.com/ethereum/eth2.0-specs",
|
||||
include_package_data=False,
|
||||
package_data={'configs': ['*.yaml'],
|
||||
|
||||
'specs': ['**/*.md'],
|
||||
'eth2spec': ['VERSION.txt']},
|
||||
package_dir={
|
||||
"eth2spec": "tests/core/pyspec/eth2spec",
|
||||
"configs": "configs",
|
||||
"specs": "specs"
|
||||
"specs": "specs",
|
||||
},
|
||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||
py_modules=["eth2spec"],
|
||||
@@ -531,16 +652,17 @@ setup(
|
||||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==3.7.7", "mypy==0.750"],
|
||||
"generator": ["python-snappy==0.5.4"],
|
||||
},
|
||||
install_requires=[
|
||||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==5.0.0",
|
||||
"milagro_bls_binding==1.5.0",
|
||||
"py_ecc==5.2.0",
|
||||
"milagro_bls_binding==1.6.3",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.17",
|
||||
"remerkleable==0.1.18",
|
||||
"ruamel.yaml==0.16.5",
|
||||
"lru-dict==1.1.6"
|
||||
"lru-dict==1.1.6",
|
||||
]
|
||||
)
|
||||
|
||||
692
specs/altair/beacon-chain.md
Normal file
692
specs/altair/beacon-chain.md
Normal file
@@ -0,0 +1,692 @@
|
||||
# Ethereum 2.0 Altair Beacon chain changes
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Participation flag indices](#participation-flag-indices)
|
||||
- [Incentivization weights](#incentivization-weights)
|
||||
- [Misc](#misc)
|
||||
- [Configuration](#configuration)
|
||||
- [Updated penalty values](#updated-penalty-values)
|
||||
- [Misc](#misc-1)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Domain types](#domain-types)
|
||||
- [Containers](#containers)
|
||||
- [Modified containers](#modified-containers)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [New containers](#new-containers)
|
||||
- [`SyncAggregate`](#syncaggregate)
|
||||
- [`SyncCommittee`](#synccommittee)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`Predicates`](#predicates)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
- [Misc](#misc-2)
|
||||
- [`get_flag_indices_and_weights`](#get_flag_indices_and_weights)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
||||
- [`get_sync_committee`](#get_sync_committee)
|
||||
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
|
||||
- [`get_base_reward`](#get_base_reward)
|
||||
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||
- [`get_flag_index_deltas`](#get_flag_index_deltas)
|
||||
- [Modified `get_inactivity_penalty_deltas`](#modified-get_inactivity_penalty_deltas)
|
||||
- [Beacon state mutators](#beacon-state-mutators)
|
||||
- [Modified `slash_validator`](#modified-slash_validator)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `process_attestation`](#modified-process_attestation)
|
||||
- [Modified `process_deposit`](#modified-process_deposit)
|
||||
- [Sync committee processing](#sync-committee-processing)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Justification and finalization](#justification-and-finalization)
|
||||
- [Inactivity scores](#inactivity-scores)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Slashings](#slashings)
|
||||
- [Participation flags updates](#participation-flags-updates)
|
||||
- [Sync committee updates](#sync-committee-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
Altair is the first beacon chain hard fork. Its main features are:
|
||||
|
||||
* sync committees to support light clients
|
||||
* incentive accounting reforms to reduce spec complexity
|
||||
* penalty parameter updates towards their planned maximally punitive values
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `ParticipationFlags` | `uint8` | a succinct representation of 8 boolean participation flags |
|
||||
|
||||
## Constants
|
||||
|
||||
### Participation flag indices
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_HEAD_FLAG_INDEX` | `0` |
|
||||
| `TIMELY_SOURCE_FLAG_INDEX` | `1` |
|
||||
| `TIMELY_TARGET_FLAG_INDEX` | `2` |
|
||||
|
||||
### Incentivization weights
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_HEAD_WEIGHT` | `12` |
|
||||
| `TIMELY_SOURCE_WEIGHT` | `12` |
|
||||
| `TIMELY_TARGET_WEIGHT` | `24` |
|
||||
| `SYNC_REWARD_WEIGHT` | `8` |
|
||||
| `WEIGHT_DENOMINATOR` | `64` |
|
||||
|
||||
*Note*: The sum of the weight fractions (7/8) plus the proposer inclusion fraction (1/8) equals 1.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Updated penalty values
|
||||
|
||||
This patch updates a few configuration values to move penalty parameters toward their final, maxmium security values.
|
||||
|
||||
*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT_ALTAIR` | `uint64(3 * 2**24)` (= 50,331,648) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` | `uint64(2**6)` (= 64) |
|
||||
| `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR` | `uint64(2)` |
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1,024) |
|
||||
| `SYNC_SUBCOMMITTEE_SIZE` | `uint64(2**6)` (= 64) |
|
||||
| `INACTIVITY_SCORE_BIAS` | `uint64(4)` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_SYNC_COMMITTEE` | `DomainType('0x07000000')` |
|
||||
| `DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF` | `DomainType('0x08000000')` |
|
||||
| `DOMAIN_CONTRIBUTION_AND_PROOF` | `DomainType('0x09000000')` |
|
||||
|
||||
## Containers
|
||||
|
||||
### Modified containers
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Operations
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
# [New in Altair]
|
||||
sync_aggregate: SyncAggregate
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Participation
|
||||
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] # [Modified in Altair]
|
||||
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] # [Modified in Altair]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Inactivity
|
||||
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] # [New in Altair]
|
||||
# Sync
|
||||
current_sync_committee: SyncCommittee # [New in Altair]
|
||||
next_sync_committee: SyncCommittee # [New in Altair]
|
||||
```
|
||||
|
||||
### New containers
|
||||
|
||||
#### `SyncAggregate`
|
||||
|
||||
```python
|
||||
class SyncAggregate(Container):
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||
sync_committee_signature: BLSSignature
|
||||
```
|
||||
|
||||
#### `SyncCommittee`
|
||||
|
||||
```python
|
||||
class SyncCommittee(Container):
|
||||
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
|
||||
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_SUBCOMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `Predicates`
|
||||
|
||||
#### `eth2_fast_aggregate_verify`
|
||||
|
||||
```python
|
||||
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||
"""
|
||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||
"""
|
||||
if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY:
|
||||
return True
|
||||
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||
```
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_flag_indices_and_weights`
|
||||
|
||||
```python
|
||||
def get_flag_indices_and_weights() -> Sequence[Tuple[int, int]]:
|
||||
return (
|
||||
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT),
|
||||
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT),
|
||||
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_WEIGHT),
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_flag`
|
||||
|
||||
```python
|
||||
def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags | flag
|
||||
```
|
||||
|
||||
#### `has_flag`
|
||||
|
||||
```python
|
||||
def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags & flag == flag
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_sync_committee_indices`
|
||||
|
||||
```python
|
||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sequence of sync committee indices (which may include duplicate indices) for a given state and epoch.
|
||||
"""
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
||||
active_validator_count = uint64(len(active_validator_indices))
|
||||
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
i = 0
|
||||
sync_committee_indices: List[ValidatorIndex] = []
|
||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||
sync_committee_indices.append(candidate_index)
|
||||
i += 1
|
||||
return sync_committee_indices
|
||||
```
|
||||
|
||||
#### `get_sync_committee`
|
||||
|
||||
```python
|
||||
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
||||
"""
|
||||
Return the sync committee for a given state and epoch.
|
||||
"""
|
||||
indices = get_sync_committee_indices(state, epoch)
|
||||
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||
subcommitees = [pubkeys[i:i + SYNC_SUBCOMMITTEE_SIZE] for i in range(0, len(pubkeys), SYNC_SUBCOMMITTEE_SIZE)]
|
||||
pubkey_aggregates = [bls.AggregatePKs(subcommitee) for subcommitee in subcommitees]
|
||||
return SyncCommittee(pubkeys=pubkeys, pubkey_aggregates=pubkey_aggregates)
|
||||
```
|
||||
|
||||
#### `get_base_reward_per_increment`
|
||||
|
||||
```python
|
||||
def get_base_reward_per_increment(state: BeaconState) -> Gwei:
|
||||
return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
|
||||
```
|
||||
|
||||
#### `get_base_reward`
|
||||
|
||||
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
return Gwei(increments * get_base_reward_per_increment(state))
|
||||
```
|
||||
|
||||
#### `get_unslashed_participating_indices`
|
||||
|
||||
```python
|
||||
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Return the active and unslashed validator indices for the given epoch and flag index.
|
||||
"""
|
||||
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
if epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
|
||||
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
|
||||
```
|
||||
|
||||
#### `get_flag_index_deltas`
|
||||
|
||||
```python
|
||||
def get_flag_index_deltas(state: BeaconState, flag_index: int, weight: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Return the deltas for a given flag index by scanning through the participation flags.
|
||||
"""
|
||||
rewards = [Gwei(0)] * len(state.validators)
|
||||
penalties = [Gwei(0)] * len(state.validators)
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
||||
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
||||
active_increments = get_total_active_balance(state) // increment
|
||||
for index in get_eligible_validator_indices(state):
|
||||
base_reward = get_base_reward(state, index)
|
||||
if index in unslashed_participating_indices:
|
||||
if is_in_inactivity_leak(state):
|
||||
# This flag reward cancels the inactivity penalty corresponding to the flag index
|
||||
rewards[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||
else:
|
||||
reward_numerator = base_reward * weight * unslashed_participating_increments
|
||||
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
|
||||
else:
|
||||
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
#### Modified `get_inactivity_penalty_deltas`
|
||||
|
||||
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
||||
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores.
|
||||
"""
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
if is_in_inactivity_leak(state):
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
for (_, weight) in get_flag_indices_and_weights():
|
||||
# This inactivity penalty cancels the flag reward corresponding to the flag index
|
||||
penalties[index] += Gwei(get_base_reward(state, index) * weight // WEIGHT_DENOMINATOR)
|
||||
if index not in matching_target_indices:
|
||||
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
### Beacon state mutators
|
||||
|
||||
#### Modified `slash_validator`
|
||||
|
||||
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
epoch = get_current_epoch(state)
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if whistleblower_index is None:
|
||||
whistleblower_index = proposer_index
|
||||
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in Altair]
|
||||
process_sync_committee(state, block.body.sync_aggregate) # [New in Altair]
|
||||
```
|
||||
|
||||
#### Modified `process_attestation`
|
||||
|
||||
*Note*: The function `process_attestation` is modified to do incentive accounting with epoch participation flags.
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
|
||||
committee = get_beacon_committee(state, data.slot, data.index)
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
if data.target.epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
justified_checkpoint = state.current_justified_checkpoint
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
justified_checkpoint = state.previous_justified_checkpoint
|
||||
|
||||
# Matching roots
|
||||
is_matching_head = data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||
is_matching_source = data.source == justified_checkpoint
|
||||
is_matching_target = data.target.root == get_block_root(state, data.target.epoch)
|
||||
assert is_matching_source
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = []
|
||||
if is_matching_head and is_matching_target and state.slot <= data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
|
||||
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
|
||||
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
|
||||
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
|
||||
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
# Update epoch participation flags
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
for flag_index, weight in get_flag_indices_and_weights():
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward = Gwei(proposer_reward_numerator // (WEIGHT_DENOMINATOR * PROPOSER_REWARD_QUOTIENT))
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
|
||||
#### Modified `process_deposit`
|
||||
|
||||
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, `current_epoch_participation`.
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Verify the Merkle branch
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
||||
pubkey = deposit.data.pubkey
|
||||
amount = deposit.data.amount
|
||||
validator_pubkeys = [validator.pubkey for validator in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
# Initialize validator if the deposit signature is valid
|
||||
if bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
state.validators.append(get_validator_from_deposit(state, deposit))
|
||||
state.balances.append(amount)
|
||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(0)
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
```
|
||||
|
||||
#### Sync committee processing
|
||||
|
||||
```python
|
||||
def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None:
|
||||
# Verify sync committee aggregate signature signing over the previous slot block root
|
||||
previous_slot = Slot(max(int(state.slot), 1) - 1)
|
||||
committee_indices = get_sync_committee_indices(state, get_current_epoch(state))
|
||||
included_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit]
|
||||
committee_pubkeys = state.current_sync_committee.pubkeys
|
||||
included_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, aggregate.sync_committee_bits) if bit]
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||
assert eth2_fast_aggregate_verify(included_pubkeys, signing_root, aggregate.sync_committee_signature)
|
||||
|
||||
# Compute the maximum sync rewards for the slot
|
||||
total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_epoch_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR)
|
||||
max_slot_rewards = Gwei(max_epoch_rewards * len(included_indices) // len(committee_indices) // SLOTS_PER_EPOCH)
|
||||
|
||||
# Compute the participant and proposer sync rewards
|
||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||
committee_effective_balance = max(EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance)
|
||||
for included_index in included_indices:
|
||||
effective_balance = state.validators[included_index].effective_balance
|
||||
inclusion_reward = Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||
proposer_reward = Gwei(inclusion_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
increase_balance(state, included_index, inclusion_reward - proposer_reward)
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state) # [Modified in Altair]
|
||||
process_inactivity_updates(state) # [New in Altair]
|
||||
process_rewards_and_penalties(state) # [Modified in Altair]
|
||||
process_registry_updates(state)
|
||||
process_slashings(state) # [Modified in Altair]
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_flag_updates(state) # [New in Altair]
|
||||
process_sync_committee_updates(state) # [New in Altair]
|
||||
```
|
||||
|
||||
#### Justification and finalization
|
||||
|
||||
*Note*: The function `process_justification_and_finalization` is modified with `matching_target_attestations` replaced by `matching_target_indices`.
|
||||
|
||||
```python
|
||||
def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
return
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
current_epoch = get_current_epoch(state)
|
||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
# Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
||||
state.justification_bits[0] = 0b0
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
root=get_block_root(state, previous_epoch))
|
||||
state.justification_bits[1] = 0b1
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, current_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
root=get_block_root(state, current_epoch))
|
||||
state.justification_bits[0] = 0b1
|
||||
|
||||
# Process finalizations
|
||||
bits = state.justification_bits
|
||||
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
```
|
||||
|
||||
#### Inactivity scores
|
||||
|
||||
*Note*: The function `process_inactivity_updates` is new.
|
||||
|
||||
```python
|
||||
def process_inactivity_updates(state: BeaconState) -> None:
|
||||
for index in get_eligible_validator_indices(state):
|
||||
if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)):
|
||||
if state.inactivity_scores[index] > 0:
|
||||
state.inactivity_scores[index] -= 1
|
||||
elif is_in_inactivity_leak(state):
|
||||
state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS
|
||||
```
|
||||
|
||||
#### Rewards and penalties
|
||||
|
||||
*Note*: The function `process_rewards_and_penalties` is modified to support the incentive accounting reforms.
|
||||
|
||||
```python
|
||||
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
flag_indices_and_numerators = get_flag_indices_and_weights()
|
||||
flag_deltas = [get_flag_index_deltas(state, index, numerator) for (index, numerator) in flag_indices_and_numerators]
|
||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||
for (rewards, penalties) in deltas:
|
||||
for index in range(len(state.validators)):
|
||||
increase_balance(state, ValidatorIndex(index), rewards[index])
|
||||
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||
```
|
||||
|
||||
#### Slashings
|
||||
|
||||
*Note*: The function `process_slashings` is modified to use `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR`.
|
||||
|
||||
```python
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Participation flags updates
|
||||
|
||||
*Note*: The function `process_participation_flag_updates` is new.
|
||||
|
||||
```python
|
||||
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
state.previous_epoch_participation = state.current_epoch_participation
|
||||
state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
```
|
||||
|
||||
#### Sync committee updates
|
||||
|
||||
*Note*: The function `process_sync_committee_updates` is new.
|
||||
|
||||
```python
|
||||
def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
```
|
||||
85
specs/altair/fork.md
Normal file
85
specs/altair/fork.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# Ethereum 2.0 Altair fork
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Altair](#fork-to-altair)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of the first upgrade of Ethereum 2.0: the Altair hard fork, introducing light client support and other improvements.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `ALTAIR_FORK_VERSION` | `Version('0x01000000')` |
|
||||
| `ALTAIR_FORK_SLOT` | `Slot(0)` **TBD** |
|
||||
|
||||
## Fork to Altair
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `ALTAIR_FORK_SLOT`, where `ALTAIR_FORK_SLOT % SLOTS_PER_EPOCH == 0`.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
After `process_slots` of Phase 0 finishes, if `state.slot == ALTAIR_FORK_SLOT`, an irregular state change is made to upgrade to Altair.
|
||||
|
||||
```python
|
||||
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||
epoch = get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=ALTAIR_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||
current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=[0 for _ in range(len(pre.validators))],
|
||||
)
|
||||
# Fill in sync committees
|
||||
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
||||
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
return post
|
||||
```
|
||||
221
specs/altair/p2p-interface.md
Normal file
221
specs/altair/p2p-interface.md
Normal file
@@ -0,0 +1,221 @@
|
||||
# Ethereum Altair networking specification
|
||||
|
||||
This document contains the networking specification for Ethereum 2.0 clients added during the Altair deployment.
|
||||
This document should be viewed as additive to the [document from Phase 0](../phase0/p2p-interface.md) and will be referred to as the "Phase 0 document" hereafter.
|
||||
Readers should understand the Phase 0 document and use it as a basis to understand the changes outlined in this document.
|
||||
|
||||
Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery domain. Some Phase 0 features will be deprecated, but not removed immediately.
|
||||
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Warning](#warning)
|
||||
- [Modifications in Altair](#modifications-in-altair)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`sync_committee_contribution_and_proof`](#sync_committee_contribution_and_proof)
|
||||
- [Sync committee subnets](#sync-committee-subnets)
|
||||
- [`sync_committee_{subnet_id}`](#sync_committee_subnet_id)
|
||||
- [Sync committees and aggregation](#sync-committees-and-aggregation)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Req-Resp interaction](#req-resp-interaction)
|
||||
- [`ForkDigest`-context](#forkdigest-context)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [Transitioning from v1 to v2](#transitioning-from-v1-to-v2)
|
||||
- [The discovery domain: discv5](#the-discovery-domain-discv5)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Warning
|
||||
|
||||
This document is currently illustrative for early Altair testnets and some parts are subject to change.
|
||||
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||
|
||||
# Modifications in Altair
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
|
||||
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in the Phase 0 document.
|
||||
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||
| `sync_committee_contribution_and_proof` | `SignedContributionAndProof` |
|
||||
| `sync_committee_{subnet_id}` | `SyncCommitteeSignature` |
|
||||
|
||||
Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers).
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
|
||||
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee signatures to all potential proposers of beacon blocks.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
The existing specification for this topic does not change from the Phase 0 document,
|
||||
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
|
||||
This type changes due to the inclusion of the inner `BeaconBlockBody` that is modified in Altair.
|
||||
|
||||
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
|
||||
|
||||
##### `sync_committee_contribution_and_proof`
|
||||
|
||||
This topic is used to propagate partially aggregated sync committee signatures to be included in future blocks.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message` and `contribution = contribution_and_proof.contribution` for convenience:
|
||||
|
||||
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
||||
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot`.
|
||||
- _[REJECT]_ The aggregator's validator index is within the current sync committee --
|
||||
i.e. `state.validators[aggregate_and_proof.aggregator_index].pubkey in state.current_sync_committee.pubkeys`.
|
||||
- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature of the `contribution.slot` by the validator with index `contribution_and_proof.aggregator_index`.
|
||||
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(state, contribution.slot, contribution_and_proof.selection_proof)` returns `True`.
|
||||
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
||||
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `subcommittee_index`.
|
||||
|
||||
#### Sync committee subnets
|
||||
|
||||
Sync committee subnets are used to propagate unaggregated sync committee signatures to subsections of the network.
|
||||
|
||||
##### `sync_committee_{subnet_id}`
|
||||
|
||||
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
The following validations MUST pass before forwarding the `sync_committee_signature` on the network:
|
||||
|
||||
- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`.
|
||||
- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`.
|
||||
- _[REJECT]_ The validator producing this `sync_committee_signature` is in the current sync committee, i.e. `state.validators[sync_committee_signature.validator_index].pubkey in state.current_sync_committee.pubkeys`.
|
||||
- _[REJECT]_ The `subnet_id` is correct, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`.
|
||||
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||
|
||||
#### Sync committees and aggregation
|
||||
|
||||
The aggregation scheme closely follows the design of the attestation aggregation scheme.
|
||||
Sync committee signatures are broadcast into "subnets" defined by a topic.
|
||||
The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants).
|
||||
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
|
||||
Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees.
|
||||
|
||||
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics.
|
||||
|
||||
Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
With any fork, the fork version, and thus the `ForkDigestValue`, change.
|
||||
Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics.
|
||||
|
||||
The topics that are not removed in a fork are updated with a new `ForkDigestValue`. In advance of the fork, a node SHOULD subscribe to the post-fork variants of the topics.
|
||||
|
||||
Subscriptions are expected to be well-received, all updated nodes should subscribe as well.
|
||||
Topic-meshes can be grafted quickly as the nodes are already connected and exchanging gossip control messages.
|
||||
|
||||
Messages SHOULD NOT be re-broadcast from one fork to the other.
|
||||
A node's behavior before the fork and after the fork are as follows:
|
||||
Pre-fork:
|
||||
- Peers who propagate messages on the post-fork topics MAY be scored negatively proportionally to time till fork,
|
||||
to account for clock discrepancy.
|
||||
- Messages can be IGNORED on the post-fork topics, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` margin.
|
||||
|
||||
Post-fork:
|
||||
- Peers who propagate messages on the pre-fork topics MUST NOT be scored negatively. Lagging IWANT may force them to.
|
||||
- Messages on pre and post-fork variants of topics share application-level caches.
|
||||
E.g. an attestation on the both the old and new topic is ignored like any duplicate.
|
||||
- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Req-Resp interaction
|
||||
|
||||
An additional `<context-bytes>` field is introduced to the `response_chunk` as defined in the Phase 0 document:
|
||||
|
||||
```
|
||||
response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
```
|
||||
|
||||
All Phase 0 methods are compatible: `<context-bytes>` is empty by default.
|
||||
On a non-zero `<result>` with `ErrorMessage` payload, the `<context-bytes>` is also empty.
|
||||
|
||||
In Altair and later forks, `<context-bytes>` functions as a short meta-data,
|
||||
defined per req-resp method, and can parametrize the payload decoder.
|
||||
|
||||
#### `ForkDigest`-context
|
||||
|
||||
Starting with Altair, and in future forks, SSZ type definitions may change.
|
||||
For this common case, we define the `ForkDigest`-context:
|
||||
|
||||
A fixed-width 4 byte `<context-bytes>`, set to the `ForkDigest` matching the chunk:
|
||||
`compute_fork_digest(fork_version, genesis_validators_root)`.
|
||||
|
||||
### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
|
||||
### Transitioning from v1 to v2
|
||||
|
||||
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
|
||||
This is non-breaking, and is recommended as soon as the fork specification is stable.
|
||||
|
||||
The v1 variants will be deprecated, and implementations should use v2 when available
|
||||
(as negotiatied with peers via LibP2P multistream-select).
|
||||
|
||||
The v1 method MAY be unregistered at the fork boundary.
|
||||
In the event of a request on v1 for an Altair specific payload,
|
||||
the responder MUST return the **InvalidRequest** response code.
|
||||
|
||||
## The discovery domain: discv5
|
||||
|
||||
The `attnets` key of the ENR is used as defined in the Phase 0 document.
|
||||
|
||||
An additional bitfield is added to the ENR under the key `syncnets` to facilitate sync committee subnet discovery.
|
||||
The length of this bitfield is `SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct `subnet_id` for a specific sync committee subnet.
|
||||
The `i`th bit is set in this bitfield if the validator is currently subscribed to the `sync_committee_{i}` topic.
|
||||
|
||||
See the [validator document](./validator.md#sync-committee-subnet-stability) for further details on how the new bits are used.
|
||||
200
specs/altair/sync-protocol.md
Normal file
200
specs/altair/sync-protocol.md
Normal file
@@ -0,0 +1,200 @@
|
||||
# Minimal Light Client
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Misc](#misc)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientSnapshot`](#lightclientsnapshot)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [`LightClientStore`](#lightclientstore)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`get_subtree_index`](#get_subtree_index)
|
||||
- [Light client state updates](#light-client-state-updates)
|
||||
- [`validate_light_client_update`](#validate_light_client_update)
|
||||
- [`apply_light_client_update`](#apply_light_client_update)
|
||||
- [`process_light_client_update`](#process_light_client_update)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
Eth2 is designed to be light client friendly for constrained environments to
|
||||
access Eth2 with reasonable safety and liveness.
|
||||
Such environments include resource-constrained devices (e.g. phones for trust-minimised wallets)
|
||||
and metered VMs (e.g. blockchain VMs for cross-chain bridges).
|
||||
|
||||
This document suggests a minimal light client design for the beacon chain that
|
||||
uses sync committees introduced in [this beacon chain extension](./beacon-chain.md).
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` |
|
||||
| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` |
|
||||
| `MAX_VALID_LIGHT_CLIENT_UPDATES` | `uint64(2**64 - 1)` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `LIGHT_CLIENT_UPDATE_TIMEOUT` | `Slot(2**13)` | slots | ~27 hours |
|
||||
|
||||
## Containers
|
||||
|
||||
### `LightClientSnapshot`
|
||||
|
||||
```python
|
||||
class LightClientSnapshot(Container):
|
||||
# Beacon block header
|
||||
header: BeaconBlockHeader
|
||||
# Sync committees corresponding to the header
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
```
|
||||
|
||||
### `LightClientUpdate`
|
||||
|
||||
```python
|
||||
class LightClientUpdate(Container):
|
||||
# Update beacon block header
|
||||
header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to the header
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# Finality proof for the update header
|
||||
finality_header: BeaconBlockHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||
sync_committee_signature: BLSSignature
|
||||
# Fork version for the aggregate signature
|
||||
fork_version: Version
|
||||
```
|
||||
|
||||
### `LightClientStore`
|
||||
|
||||
```python
|
||||
class LightClientStore(Container):
|
||||
snapshot: LightClientSnapshot
|
||||
valid_updates: List[LightClientUpdate, MAX_VALID_LIGHT_CLIENT_UPDATES]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `get_subtree_index`
|
||||
|
||||
```python
|
||||
def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
|
||||
return uint64(generalized_index % 2**(floorlog2(generalized_index)))
|
||||
```
|
||||
|
||||
## Light client state updates
|
||||
|
||||
A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot)` where `current_slot` is the current slot based on some local clock.
|
||||
|
||||
#### `validate_light_client_update`
|
||||
|
||||
```python
|
||||
def validate_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate,
|
||||
genesis_validators_root: Root) -> None:
|
||||
# Verify update slot is larger than snapshot slot
|
||||
assert update.header.slot > snapshot.header.slot
|
||||
|
||||
# Verify update does not skip a sync committee period
|
||||
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
assert update_period in (snapshot_period, snapshot_period + 1)
|
||||
|
||||
# Verify update header root is the finalized root of the finality header, if specified
|
||||
if update.finality_header == BeaconBlockHeader():
|
||||
signed_header = update.header
|
||||
assert update.finality_branch == [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
else:
|
||||
signed_header = update.finality_header
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.header),
|
||||
branch=update.finality_branch,
|
||||
depth=floorlog2(FINALIZED_ROOT_INDEX),
|
||||
index=get_subtree_index(FINALIZED_ROOT_INDEX),
|
||||
root=update.finality_header.state_root,
|
||||
)
|
||||
|
||||
# Verify update next sync committee if the update period incremented
|
||||
if update_period == snapshot_period:
|
||||
sync_committee = snapshot.current_sync_committee
|
||||
assert update.next_sync_committee_branch == [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
else:
|
||||
sync_committee = snapshot.next_sync_committee
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.next_sync_committee),
|
||||
branch=update.next_sync_committee_branch,
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=update.header.state_root,
|
||||
)
|
||||
|
||||
# Verify sync committee has sufficient participants
|
||||
assert sum(update.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify sync committee aggregate signature
|
||||
participant_pubkeys = [pubkey for (bit, pubkey) in zip(update.sync_committee_bits, sync_committee.pubkeys) if bit]
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version, genesis_validators_root)
|
||||
signing_root = compute_signing_root(signed_header, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, update.sync_committee_signature)
|
||||
```
|
||||
|
||||
#### `apply_light_client_update`
|
||||
|
||||
```python
|
||||
def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> None:
|
||||
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
if update_period == snapshot_period + 1:
|
||||
snapshot.current_sync_committee = snapshot.next_sync_committee
|
||||
snapshot.next_sync_committee = update.next_sync_committee
|
||||
snapshot.header = update.header
|
||||
```
|
||||
|
||||
#### `process_light_client_update`
|
||||
|
||||
```python
|
||||
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
validate_light_client_update(store.snapshot, update, genesis_validators_root)
|
||||
store.valid_updates.append(update)
|
||||
|
||||
if (
|
||||
sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2
|
||||
and update.finality_header != BeaconBlockHeader()
|
||||
):
|
||||
# Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
|
||||
# Note that (2) means that the current light client design needs finality.
|
||||
# It may be changed to re-organizable light client design. See the on-going issue eth2.0-specs#2182.
|
||||
apply_light_client_update(store.snapshot, update)
|
||||
store.valid_updates = []
|
||||
elif current_slot > store.snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT:
|
||||
# Forced best update when the update timeout has elapsed
|
||||
apply_light_client_update(store.snapshot,
|
||||
max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
|
||||
store.valid_updates = []
|
||||
```
|
||||
415
specs/altair/validator.md
Normal file
415
specs/altair/validator.md
Normal file
@@ -0,0 +1,415 @@
|
||||
# Ethereum 2.0 Altair -- Honest Validator
|
||||
|
||||
This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Warning](#warning)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Containers](#containers)
|
||||
- [`SyncCommitteeSignature`](#synccommitteesignature)
|
||||
- [`SyncCommitteeContribution`](#synccommitteecontribution)
|
||||
- [`ContributionAndProof`](#contributionandproof)
|
||||
- [`SignedContributionAndProof`](#signedcontributionandproof)
|
||||
- [Validator assignments](#validator-assignments)
|
||||
- [Sync Committee](#sync-committee)
|
||||
- [Lookahead](#lookahead)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Preparing a `BeaconBlock`](#preparing-a-beaconblock)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [Sync committee](#sync-committee)
|
||||
- [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock)
|
||||
- [Attesting and attestation aggregation](#attesting-and-attestation-aggregation)
|
||||
- [Sync committees](#sync-committees)
|
||||
- [Sync committee signatures](#sync-committee-signatures)
|
||||
- [Prepare sync committee signature](#prepare-sync-committee-signature)
|
||||
- [Broadcast sync committee signature](#broadcast-sync-committee-signature)
|
||||
- [Sync committee contributions](#sync-committee-contributions)
|
||||
- [Aggregation selection](#aggregation-selection)
|
||||
- [Construct sync committee contribution](#construct-sync-committee-contribution)
|
||||
- [Slot](#slot)
|
||||
- [Beacon block root](#beacon-block-root)
|
||||
- [Subcommittee index](#subcommittee-index)
|
||||
- [Aggregate bits](#aggregate-bits)
|
||||
- [Signature](#signature)
|
||||
- [Broadcast sync committee contribution](#broadcast-sync-committee-contribution)
|
||||
- [Sync committee subnet stability](#sync-committee-subnet-stability)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document represents the expected behavior of an "honest validator" with respect to Altair of the Ethereum 2.0 protocol.
|
||||
It builds on the [previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) of the Ethereum 2.0 protocol.
|
||||
This previous document is referred to below as the "Phase 0 document".
|
||||
|
||||
Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees.
|
||||
See the [sync protocol](./sync-protocol.md) for further details on the light client sync.
|
||||
Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required.
|
||||
Block proposers incorporate the (aggregated) sync committee signatures into each block they produce.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout.
|
||||
Please see this document before continuing and use as a reference throughout.
|
||||
|
||||
## Warning
|
||||
|
||||
This document is currently illustrative for early Altair testnets and some parts are subject to change, especially pending implementation and profiling of Altair testnets.
|
||||
|
||||
## Constants
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**2` (= 4) | validators |
|
||||
| `SYNC_COMMITTEE_SUBNET_COUNT` | `8` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
|
||||
|
||||
## Containers
|
||||
|
||||
### `SyncCommitteeSignature`
|
||||
|
||||
```python
|
||||
class SyncCommitteeSignature(Container):
|
||||
# Slot to which this contribution pertains
|
||||
slot: Slot
|
||||
# Block root for this signature
|
||||
beacon_block_root: Root
|
||||
# Index of the validator that produced this signature
|
||||
validator_index: ValidatorIndex
|
||||
# Signature by the validator over the block root of `slot`
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `SyncCommitteeContribution`
|
||||
|
||||
```python
|
||||
class SyncCommitteeContribution(Container):
|
||||
# Slot to which this contribution pertains
|
||||
slot: Slot
|
||||
# Block root for this contribution
|
||||
beacon_block_root: Root
|
||||
# The subcommittee this contribution pertains to out of the broader sync committee
|
||||
subcommittee_index: uint64
|
||||
# A bit is set if a signature from the validator at the corresponding
|
||||
# index in the subcommittee is present in the aggregate `signature`.
|
||||
aggregation_bits: Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]
|
||||
# Signature by the validator(s) over the block root of `slot`
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `ContributionAndProof`
|
||||
|
||||
```python
|
||||
class ContributionAndProof(Container):
|
||||
aggregator_index: ValidatorIndex
|
||||
contribution: SyncCommitteeContribution
|
||||
selection_proof: BLSSignature
|
||||
```
|
||||
|
||||
### `SignedContributionAndProof`
|
||||
|
||||
```python
|
||||
class SignedContributionAndProof(Container):
|
||||
message: ContributionAndProof
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
## Validator assignments
|
||||
|
||||
A validator determines beacon committee assignments and beacon block proposal duties as defined in the Phase 0 document.
|
||||
|
||||
### Sync Committee
|
||||
|
||||
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
|
||||
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
|
||||
|
||||
```python
|
||||
def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
|
||||
def is_assigned_to_sync_committee(state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> bool:
|
||||
sync_committee_period = compute_sync_committee_period(epoch)
|
||||
current_epoch = get_current_epoch(state)
|
||||
current_sync_committee_period = compute_sync_committee_period(current_epoch)
|
||||
next_sync_committee_period = current_sync_committee_period + 1
|
||||
assert sync_committee_period in (current_sync_committee_period, next_sync_committee_period)
|
||||
|
||||
pubkey = state.validators[validator_index].pubkey
|
||||
if sync_committee_period == current_sync_committee_period:
|
||||
return pubkey in state.current_sync_committee.pubkeys
|
||||
else: # sync_committee_period == next_sync_committee_period
|
||||
return pubkey in state.next_sync_committee.pubkeys
|
||||
```
|
||||
|
||||
### Lookahead
|
||||
|
||||
The sync committee shufflings give validators 1 sync committee period of lookahead which amounts to `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs.
|
||||
At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the next `SyncCommittee`.
|
||||
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
|
||||
|
||||
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
|
||||
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
|
||||
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
|
||||
|
||||
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
|
||||
Specifically, a validator should:
|
||||
* Upon (re)syncing the chain and upon sync committee period boundaries, check for assignments in the current and next sync committee periods.
|
||||
* If the validator is in the current sync committee period, they can perform the responsibilities below for sync committee rewards.
|
||||
* If the validator is in the next sync committee period, they should wait until the next `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary and then perform the responsibilities throughout that period.
|
||||
|
||||
## Beacon chain responsibilities
|
||||
|
||||
A validator maintains the responsibilities given in the Phase 0 document.
|
||||
|
||||
Block proposals are modified to incorporate the sync committee signatures as detailed below.
|
||||
|
||||
When assigned to a sync committee, validators have a new responsibility to sign beacon block roots during each slot of the sync committee period.
|
||||
These signatures are aggregated and routed to the proposer over gossip for inclusion into a beacon block.
|
||||
Assignments to a particular sync committee are infrequent at normal validator counts; however, an action every slot is required when in the current active sync committee.
|
||||
|
||||
### Block proposal
|
||||
|
||||
Refer to the phase 0 document for the majority of the [block proposal responsibility](../phase0/validator.md#block-proposal).
|
||||
The validator should follow those instructions to prepare a `SignedBeaconBlock` for inclusion into the chain. All changes are additive to phase 0 and noted below.
|
||||
|
||||
#### Preparing a `BeaconBlock`
|
||||
|
||||
No change to [Preparing for a `BeaconBlock`](../phase0/validator.md#preparing-for-a-beaconblock).
|
||||
|
||||
#### Constructing the `BeaconBlockBody`
|
||||
|
||||
Each section of [Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) should be followed.
|
||||
After constructing the `BeaconBlockBody` as per that section, the proposer has an additional task to include the sync committee signatures:
|
||||
|
||||
##### Sync committee
|
||||
|
||||
The proposer receives a number of `SyncCommitteeContribution`s (wrapped in `SignedContributionAndProof`s on the wire) from validators in the sync committee who are selected to partially aggregate signatures from independent subcommittees formed by breaking the full sync committee into `SYNC_COMMITTEE_SUBNET_COUNT` pieces (see below for details).
|
||||
|
||||
The proposer collects these contributions for further aggregation when preparing a block.
|
||||
Proposers should select the best contribution seen across all aggregators for each subnet/subcommittee when preparing a block.
|
||||
A contribution with more valid signatures is better than a contribution with fewer signatures.
|
||||
|
||||
Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where the `i`th bit is `True` if the corresponding validator in the sync committee has produced a valid signature,
|
||||
and that `block.body.sync_aggregate.sync_committee_signature` is the aggregate BLS signature combining all of the valid signatures.
|
||||
|
||||
Given a collection of the best seen `contributions` (with no repeating `subcommittee_index` values) and the `BeaconBlock` under construction,
|
||||
the proposer processes them as follows:
|
||||
|
||||
```python
|
||||
def process_sync_committee_contributions(state: BeaconState,
|
||||
block: BeaconBlock,
|
||||
contributions: Set[SyncCommitteeContribution]) -> None:
|
||||
sync_aggregate = SyncAggregate()
|
||||
signatures = []
|
||||
|
||||
for contribution in contributions:
|
||||
subcommittee_index = contribution.subcommittee_index
|
||||
for index, participated in enumerate(contribution.aggregation_bits):
|
||||
if participated:
|
||||
participant_index = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT * subcommittee_index + index
|
||||
sync_aggregate.sync_committee_bits[participant_index] = True
|
||||
signatures.append(contribution.signature)
|
||||
|
||||
sync_aggregate.sync_committee_signature = bls.Aggregate(signatures)
|
||||
|
||||
block.body.sync_aggregate = sync_aggregate
|
||||
```
|
||||
|
||||
*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_committee` defined in the [state transition document](./beacon-chain.md#sync-committee-processing).
|
||||
In particular, this means `SyncCommitteeContribution`s received from gossip must have a `beacon_block_root` that matches the proposer's local view of the chain.
|
||||
|
||||
#### Packaging into a `SignedBeaconBlock`
|
||||
|
||||
No change to [Packaging into a `SignedBeaconBlock`](../phase0/validator.md#packaging-into-a-signedbeaconblock).
|
||||
|
||||
### Attesting and attestation aggregation
|
||||
|
||||
Refer to the phase 0 document for the [attesting](../phase0/validator.md#attesting) and [attestation aggregation](../phase0/validator.md#attestation-aggregation) responsibilities.
|
||||
There is no change compared to the phase 0 document.
|
||||
|
||||
### Sync committees
|
||||
|
||||
Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot.
|
||||
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
|
||||
This process occurs each slot.
|
||||
|
||||
#### Sync committee signatures
|
||||
|
||||
##### Prepare sync committee signature
|
||||
|
||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee` above returns `True`), then for every slot in the current sync committee period the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot.
|
||||
|
||||
This logic is triggered upon the same conditions as when producing an attestation.
|
||||
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||
|
||||
`get_sync_committee_signature` assumes `state` is the head state corresponding to processing the block at the current slot as determined by the fork choice (including any empty slots processed with `process_slots`), `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||
|
||||
```python
|
||||
def get_sync_committee_signature(state: BeaconState,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int) -> SyncCommitteeSignature:
|
||||
epoch = get_current_epoch(state)
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, state.slot), domain)
|
||||
signature = bls.Sign(privkey, signing_root)
|
||||
|
||||
return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature)
|
||||
```
|
||||
|
||||
##### Broadcast sync committee signature
|
||||
|
||||
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
||||
|
||||
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
|
||||
It can be computed via `compute_subnets_for_sync_committee` where `state` is a `BeaconState` during the matching sync committee period.
|
||||
This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
|
||||
|
||||
```python
|
||||
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
|
||||
target_pubkey = state.validators[validator_index].pubkey
|
||||
sync_committee_indices = [index for index, pubkey in enumerate(state.current_sync_committee.pubkeys) if pubkey == target_pubkey]
|
||||
return [
|
||||
uint64(index // (SYNC_COMMITEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||
for index in sync_committee_indices
|
||||
]
|
||||
```
|
||||
|
||||
*Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee.
|
||||
|
||||
*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_signature` on each of the distinct subnets.
|
||||
|
||||
#### Sync committee contributions
|
||||
|
||||
Each slot some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
|
||||
|
||||
##### Aggregation selection
|
||||
|
||||
A validator is selected to aggregate based on the computation in `is_sync_committee_aggregator` where `state` is a `BeaconState` as supplied to `get_sync_committee_slot_signature` and `signature` is the BLS signature returned by `get_sync_committee_slot_signature`.
|
||||
The signature function takes a `BeaconState` with the relevant sync committees for the queried `slot` (i.e. `state.slot` is within the span covered by the current or next sync committee period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is the BLS private key associated with the validator.
|
||||
|
||||
```python
|
||||
class SyncCommitteeSigningData(Container):
|
||||
slot: Slot
|
||||
subcommittee_index: uint64
|
||||
```
|
||||
|
||||
```python
|
||||
def get_sync_committee_slot_signature(state: BeaconState, slot: Slot, subcommittee_index: uint64, privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
||||
signing_data = SyncCommitteeSigningData(
|
||||
slot=slot,
|
||||
subcommittee_index=subcommittee_index,
|
||||
)
|
||||
signing_root = compute_signing_root(signing_data, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
```python
|
||||
def is_sync_committee_aggregator(state: BeaconState, signature: BLSSignature) -> bool:
|
||||
modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
|
||||
return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
|
||||
```
|
||||
|
||||
*NOTE*: the set of aggregators generally changes every slot; however, the assignments can be computed ahead of time as soon as the committee is known
|
||||
|
||||
##### Construct sync committee contribution
|
||||
|
||||
If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
||||
|
||||
Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
||||
|
||||
###### Slot
|
||||
|
||||
Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the slot in question.
|
||||
|
||||
###### Beacon block root
|
||||
|
||||
Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_signatures`.
|
||||
|
||||
###### Subcommittee index
|
||||
|
||||
Set `contribution.subcommittee_index` to the index for the subcommittee index corresponding to the subcommittee assigned to this subnet. This index matches the `subnet_id` used to derive the topic name.
|
||||
|
||||
###### Aggregate bits
|
||||
|
||||
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
||||
An aggregator needs to find the index in the sync committee (as returned by `get_sync_committee_indices`) for a given validator referenced by `sync_committee_signature.validator_index` and map the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is the one set in the `Bitvector`.
|
||||
For example, a validator with index `2044` could be at index `15` in the current sync committee. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
||||
Also note that a validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`.
|
||||
|
||||
###### Signature
|
||||
|
||||
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate` function to produce an aggregate `BLSSignature`.
|
||||
The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee.
|
||||
|
||||
##### Broadcast sync committee contribution
|
||||
|
||||
If the validator is selected to aggregate (`is_sync_committee_aggregator`), then they broadcast their best aggregate as a `SignedContributionAndProof` to the global aggregate channel (`sync_committee_contribution_and_proof` topic) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
||||
|
||||
Selection proofs are provided in `ContributionAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
|
||||
|
||||
`ContributionAndProof` messages are signed by the aggregator and broadcast inside of `SignedContributionAndProof` objects to prevent a class of DoS attacks and message forgeries.
|
||||
|
||||
First, `contribution_and_proof = get_contribution_and_proof(state, validator_index, contribution, privkey)` is constructed.
|
||||
|
||||
```python
|
||||
def get_contribution_and_proof(state: BeaconState,
|
||||
aggregator_index: ValidatorIndex,
|
||||
contribution: SyncCommitteeContribution,
|
||||
privkey: int) -> ContributionAndProof:
|
||||
selection_proof = get_sync_committee_slot_signature(
|
||||
state,
|
||||
contribution.slot,
|
||||
contribution.subcommittee_index,
|
||||
privkey,
|
||||
)
|
||||
return ContributionAndProof(
|
||||
aggregator_index=aggregator_index,
|
||||
contribution=contribution,
|
||||
selection_proof=selection_proof,
|
||||
)
|
||||
```
|
||||
|
||||
Then `signed_contribution_and_proof = SignedContributionAndProof(message=contribution_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_contribution_and_proof_signature(state: BeaconState,
|
||||
contribution_and_proof: ContributionAndProof,
|
||||
privkey: int) -> BLSSignature:
|
||||
contribution = contribution_and_proof.contribution
|
||||
domain = get_domain(state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot))
|
||||
signing_root = compute_signing_root(contribution_and_proof, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
## Sync committee subnet stability
|
||||
|
||||
The sync committee subnets need special care to ensure stability given the relatively low number of validators involved in the sync committee at any particular time.
|
||||
To provide this stability, a validator must do the following:
|
||||
|
||||
* Maintain advertisement of the subnet the validator in the sync committee is assigned to in their node's ENR as soon as they have joined the subnet.
|
||||
Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and can be computed with `compute_subnets_for_sync_committee` defined above.
|
||||
ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s).
|
||||
Any bits modified for the sync committee responsibilities are unset in the ENR after any validators have left the sync committee.
|
||||
|
||||
*Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens which implies subnet assignments are not known until then.
|
||||
Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets
|
||||
Some early sync committee rewards may be missed while the initial subnets form.
|
||||
|
||||
* To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive.
|
||||
Validators should join their member subnet at the beginning of the epoch they have randomly selected.
|
||||
For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`.
|
||||
Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period.
|
||||
@@ -58,7 +58,7 @@
|
||||
- [Crypto](#crypto)
|
||||
- [`hash`](#hash)
|
||||
- [`hash_tree_root`](#hash_tree_root)
|
||||
- [BLS Signatures](#bls-signatures)
|
||||
- [BLS signatures](#bls-signatures)
|
||||
- [Predicates](#predicates)
|
||||
- [`is_active_validator`](#is_active_validator)
|
||||
- [`is_eligible_for_activation_queue`](#is_eligible_for_activation_queue)
|
||||
@@ -114,7 +114,12 @@
|
||||
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Slashings](#slashings)
|
||||
- [Final updates](#final-updates)
|
||||
- [Eth1 data votes updates](#eth1-data-votes-updates)
|
||||
- [Effective balances updates](#effective-balances-updates)
|
||||
- [Slashings balances updates](#slashings-balances-updates)
|
||||
- [Randao mixes updates](#randao-mixes-updates)
|
||||
- [Historical roots updates](#historical-roots-updates)
|
||||
- [Participation records rotation](#participation-records-rotation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [RANDAO](#randao)
|
||||
@@ -252,12 +257,12 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `uint64(2**9)` (= 512) |
|
||||
| `PROPOSER_REWARD_QUOTIENT` | `uint64(2**3)` (= 8) |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**26)` (= 67,108,864) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (=128) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (= 128) |
|
||||
| `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(1)` |
|
||||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accoutable safety margin.
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
@@ -607,17 +612,17 @@ def bytes_to_uint64(data: bytes) -> uint64:
|
||||
|
||||
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../../ssz/simple-serialize.md#merkleization).
|
||||
|
||||
#### BLS Signatures
|
||||
#### BLS signatures
|
||||
|
||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-04](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04). Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||
The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) with ciphersuite `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` defines the following functions:
|
||||
|
||||
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Sign(privkey: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(pubkey: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
||||
- `def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(PKs: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
- `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
|
||||
Within these specifications, BLS signatures are treated as a module for notational clarity, thus to verify a signature `bls.Verify(...)` is used.
|
||||
The above functions are accessed through the `bls` module, e.g. `bls.Verify`.
|
||||
|
||||
### Predicates
|
||||
|
||||
@@ -1257,7 +1262,12 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
@@ -1564,15 +1574,19 @@ def process_slashings(state: BeaconState) -> None:
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
|
||||
#### Eth1 data votes updates
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
def process_eth1_data_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset eth1 data votes
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
```
|
||||
|
||||
#### Effective balances updates
|
||||
|
||||
```python
|
||||
def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
@@ -1584,14 +1598,41 @@ def process_final_updates(state: BeaconState) -> None:
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
```
|
||||
|
||||
#### Slashings balances updates
|
||||
|
||||
```python
|
||||
def process_slashings_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
```
|
||||
|
||||
#### Randao mixes updates
|
||||
|
||||
```python
|
||||
def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
```
|
||||
|
||||
#### Historical roots updates
|
||||
```python
|
||||
def process_historical_roots_update(state: BeaconState) -> None:
|
||||
# Set historical root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
```
|
||||
|
||||
#### Participation records rotation
|
||||
|
||||
```python
|
||||
def process_participation_record_updates(state: BeaconState) -> None:
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
state.current_epoch_attestations = []
|
||||
|
||||
@@ -101,7 +101,7 @@ It consists of four main sections:
|
||||
- [Compression/Encoding](#compressionencoding)
|
||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
||||
- [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression)
|
||||
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
||||
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||
@@ -292,7 +292,7 @@ If one or more validations fail while processing the items in order, return eith
|
||||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||
|
||||
There are three additional global topics are used to propagate lower frequency validator messages
|
||||
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||
|
||||
##### `beacon_block`
|
||||
@@ -313,6 +313,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue blocks for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
||||
- _[REJECT]_ The block is from a higher slot than its parent.
|
||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
||||
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
== store.finalized_checkpoint.root`
|
||||
@@ -334,8 +335,6 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
||||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||
compute_epoch_at_slot(aggregate.data.slot)`
|
||||
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
|
||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||
- _[REJECT]_ The attestation has participants --
|
||||
@@ -419,7 +418,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||
- _[REJECT]_ The signature of `attestation` is valid.
|
||||
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
(a client MAY queue attestations for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||
@@ -937,7 +936,7 @@ where the fields of `ENRForkID` are defined as
|
||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated.
|
||||
If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||
|
||||
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available.
|
||||
*Note*: `fork_digest` is composed of values that are not known until the genesis block/state are available.
|
||||
Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known.
|
||||
One notable exception to this rule is the distribution of bootnode ENRs prior to genesis.
|
||||
In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as
|
||||
@@ -1223,7 +1222,7 @@ the node's fork choice prevents integration of these messages into the actual co
|
||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||
The exact grouping will be dependent on more involved network tests.
|
||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||
The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
|
||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||
|
||||
@@ -1369,7 +1368,7 @@ Thus, it may happen that we need to transmit an empty list - there are several w
|
||||
|
||||
Semantically, it is not an error that a block is missing during a slot making option 2 unnatural.
|
||||
|
||||
Option 1 allows allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
Option 1 allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
|
||||
Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response,
|
||||
but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen.
|
||||
@@ -1495,7 +1494,7 @@ This looks different depending on the interaction layer:
|
||||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||
|
||||
### Why are using Snappy for compression?
|
||||
### Why are we using Snappy for compression?
|
||||
|
||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||
|
||||
@@ -356,7 +356,9 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
||||
|
||||
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state.eth1_data
|
||||
# Non-substantive casting for linter
|
||||
state_eth1_data: Eth1Data = state.eth1_data
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
|
||||
return max(
|
||||
valid_votes,
|
||||
@@ -645,5 +647,5 @@ A validator client should be considered standalone and should consider the beaco
|
||||
1) Private keys -- private keys should be protected from being exported accidentally or by an attacker.
|
||||
2) Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object.
|
||||
3) Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format.
|
||||
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity leak and potential ejection due to low balance.
|
||||
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity penalty and potential ejection due to low balance.
|
||||
A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event.
|
||||
|
||||
@@ -8,13 +8,17 @@
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Custom Types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Weak Subjectivity Checkpoint](#weak-subjectivity-checkpoint)
|
||||
- [Weak Subjectivity Period](#weak-subjectivity-period)
|
||||
- [Calculating the Weak Subjectivity Period](#calculating-the-weak-subjectivity-period)
|
||||
- [`compute_weak_subjectivity_period`](#compute_weak_subjectivity_period)
|
||||
- [Weak Subjectivity Sync](#weak-subjectivity-sync)
|
||||
- [Weak Subjectivity Sync Procedure](#weak-subjectivity-sync-procedure)
|
||||
- [Checking for Stale Weak Subjectivity Checkpoint](#checking-for-stale-weak-subjectivity-checkpoint)
|
||||
- [`is_within_weak_subjectivity_period`](#is_within_weak_subjectivity_period)
|
||||
- [Distributing Weak Subjectivity Checkpoints](#distributing-weak-subjectivity-checkpoints)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@@ -34,15 +38,27 @@ For more information about weak subjectivity and why it is required, please refe
|
||||
This document uses data structures, constants, functions, and terminology from
|
||||
[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md).
|
||||
|
||||
## Custom Types
|
||||
|
||||
| Name | SSZ Equivalent | Description |
|
||||
|---|---|---|
|
||||
| `Ether` | `uint64` | an amount in Ether |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
|----------------|--------------|
|
||||
| Name | Value |
|
||||
|---|---|
|
||||
| `ETH_TO_GWEI` | `uint64(10**9)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value |
|
||||
|---|---|
|
||||
| `SAFETY_DECAY` | `uint64(10)` |
|
||||
|
||||
## Weak Subjectivity Checkpoint
|
||||
|
||||
Any `Checkpoint` can used be a Weak Subjectivity Checkpoint.
|
||||
Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint.
|
||||
These Weak Subjectivity Checkpoints are distributed by providers,
|
||||
downloaded by users and/or distributed as a part of clients, and used as input while syncing a client.
|
||||
|
||||
@@ -59,38 +75,64 @@ a safety margin of at least `1/3 - SAFETY_DECAY/100`.
|
||||
|
||||
### Calculating the Weak Subjectivity Period
|
||||
|
||||
*Note*: `compute_weak_subjectivity_period()` is planned to be updated when a more accurate calculation is made.
|
||||
A detailed analysis of the calculation of the weak subjectivity period is made in [this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf).
|
||||
|
||||
*Note*: The expressions in the report use fractions, whereas eth2.0-specs uses only `uint64` arithmetic. The expressions have been simplified to avoid computing fractions, and more details can be found [here](https://www.overleaf.com/read/wgjzjdjpvpsd).
|
||||
|
||||
*Note*: The calculations here use `Ether` instead of `Gwei`, because the large magnitude of balances in `Gwei` can cause an overflow while computing using `uint64` arithmetic operations. Using `Ether` reduces the magnitude of the multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the scope for overflows in `uint64`.
|
||||
|
||||
#### `compute_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
validator_count = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
"""
|
||||
Returns the weak subjectivity period for the current ``state``.
|
||||
This computation takes into account the effect of:
|
||||
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||
- validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
|
||||
A detailed calculation can be found at:
|
||||
https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
|
||||
"""
|
||||
ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
N = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
t = get_total_active_balance(state) // N // ETH_TO_GWEI
|
||||
T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
|
||||
delta = get_validator_churn_limit(state)
|
||||
Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
|
||||
D = SAFETY_DECAY
|
||||
|
||||
if T * (200 + 3 * D) < t * (200 + 12 * D):
|
||||
epochs_for_validator_set_churn = (
|
||||
N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
|
||||
)
|
||||
epochs_for_balance_top_ups = (
|
||||
N * (200 + 3 * D) // (600 * Delta)
|
||||
)
|
||||
ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
|
||||
else:
|
||||
weak_subjectivity_period += SAFETY_DECAY * validator_count // (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
||||
return weak_subjectivity_period
|
||||
ws_period += (
|
||||
3 * N * D * t // (200 * Delta * (T - t))
|
||||
)
|
||||
|
||||
return ws_period
|
||||
```
|
||||
|
||||
*Details about the calculation*:
|
||||
- `100` appears in the denominator to get the actual percentage ratio from `SAFETY_DECAY`
|
||||
- For more information about other terms in this equation, refer to
|
||||
[Weak Subjectivity in Eth2.0](https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2)
|
||||
A brief reference for what these values look like in practice ([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)):
|
||||
|
||||
A brief reference for what these values look like in practice:
|
||||
|
||||
| `validator_count` | `weak_subjectivity_period` |
|
||||
| ---- | ---- |
|
||||
| 1024 | 268 |
|
||||
| 2048 | 281 |
|
||||
| 4096 | 307 |
|
||||
| 8192 | 358 |
|
||||
| 16384 | 460 |
|
||||
| 32768 | 665 |
|
||||
| 65536 | 1075 |
|
||||
| 131072 | 1894 |
|
||||
| 262144 | 3532 |
|
||||
| 524288 | 3532 |
|
||||
| Safety Decay | Avg. Val. Balance (ETH) | Val. Count | Weak Sub. Period (Epochs) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| 10 | 28 | 32768 | 504 |
|
||||
| 10 | 28 | 65536 | 752 |
|
||||
| 10 | 28 | 131072 | 1248 |
|
||||
| 10 | 28 | 262144 | 2241 |
|
||||
| 10 | 28 | 524288 | 2241 |
|
||||
| 10 | 28 | 1048576 | 2241 |
|
||||
| 10 | 32 | 32768 | 665 |
|
||||
| 10 | 32 | 65536 | 1075 |
|
||||
| 10 | 32 | 131072 | 1894 |
|
||||
| 10 | 32 | 262144 | 3532 |
|
||||
| 10 | 32 | 524288 | 3532 |
|
||||
| 10 | 32 | 1048576 | 3532 |
|
||||
|
||||
## Weak Subjectivity Sync
|
||||
|
||||
@@ -101,22 +143,28 @@ Clients should allow users to input a Weak Subjectivity Checkpoint at startup, a
|
||||
1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format,
|
||||
where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`.
|
||||
Example of the format:
|
||||
|
||||
```
|
||||
0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544
|
||||
```
|
||||
2. - *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
|
||||
2. Check the weak subjectivity requirements:
|
||||
- *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
- *IF* `epoch_number <= store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
|
||||
### Checking for Stale Weak Subjectivity Checkpoint
|
||||
|
||||
Clients may choose to validate that the input Weak Subjectivity Checkpoint is not stale at the time of startup.
|
||||
To support this mechanism, the client needs to take the state at the Weak Subjectivity Checkpoint as
|
||||
a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source).
|
||||
The check can be implemented in the following way:
|
||||
|
||||
#### `is_within_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
@@ -130,4 +178,5 @@ def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_c
|
||||
```
|
||||
|
||||
## Distributing Weak Subjectivity Checkpoints
|
||||
|
||||
This section will be updated soon.
|
||||
|
||||
@@ -1054,11 +1054,16 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_challenge_deadlines(state)
|
||||
process_reveal_deadlines(state) # Phase 1
|
||||
process_challenge_deadlines(state) # Phase 1
|
||||
process_slashings(state)
|
||||
process_final_updates(state) # phase 0 final updates
|
||||
process_phase_1_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
process_phase_1_final_updates(state) # Phase 1
|
||||
```
|
||||
|
||||
#### Phase 1 final updates
|
||||
|
||||
@@ -67,7 +67,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0
|
||||
|
||||
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use as a reference throughout.
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout.
|
||||
|
||||
## Constants
|
||||
|
||||
@@ -352,7 +352,7 @@ Aggregation selection and the core of this duty are largely unchanged from Phase
|
||||
|
||||
Note the timing of when to broadcast aggregates is altered in Phase 1+.
|
||||
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot`-that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot` -- that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
|
||||
##### `AggregateAndProof`
|
||||
|
||||
|
||||
@@ -246,16 +246,4 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
|
||||
|
||||
## Implementations
|
||||
|
||||
| Language | Project | Maintainer | Implementation |
|
||||
|-|-|-|-|
|
||||
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
|
||||
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/consensus/ssz](https://github.com/sigp/lighthouse/tree/master/consensus/ssz) |
|
||||
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
|
||||
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) |
|
||||
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz) |
|
||||
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
|
||||
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) |
|
||||
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |
|
||||
| C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) |
|
||||
| C# | Cortex | Sly Gryphon | [https://www.nuget.org/packages/Cortex.SimpleSerialize](https://www.nuget.org/packages/Cortex.SimpleSerialize) |
|
||||
| C++ | | Jiyun Kim | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) |
|
||||
See https://github.com/ethereum/eth2.0-specs/issues/2138 for a list of current known implementations.
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Iterable
|
||||
|
||||
from gen_base.gen_typing import TestCase
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: str, bls_active: bool = True) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: to run tests against particular phase and/or fork.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=fork_name, bls_active=bls_active)
|
||||
)
|
||||
@@ -1,3 +0,0 @@
|
||||
ruamel.yaml==0.16.5
|
||||
eth-utils==1.6.0
|
||||
pytest>=4.4
|
||||
@@ -1,11 +0,0 @@
|
||||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='gen_helpers',
|
||||
packages=['gen_base', 'gen_from_tests'],
|
||||
install_requires=[
|
||||
"ruamel.yaml==0.16.5",
|
||||
"eth-utils==1.6.0",
|
||||
"pytest>=4.4",
|
||||
]
|
||||
)
|
||||
@@ -27,7 +27,7 @@ python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-ch
|
||||
|
||||
After installing, you can install the optional dependencies for testing and linting.
|
||||
With makefile: `make install_test`.
|
||||
Or manually: run `pip install .[testing]` and `pip install .[linting]`.
|
||||
Or manually: run `pip install .[test]` and `pip install .[lint]`.
|
||||
|
||||
These tests are not intended for client-consumption.
|
||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.0.1
|
||||
1.1.0-alpha.1
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
A util to quickly write new test suite generators with.
|
||||
|
||||
See [Generators documentation](../../generators/README.md) for integration details.
|
||||
See [Generators documentation](../../../../generators/README.md) for integration details.
|
||||
|
||||
Options:
|
||||
|
||||
@@ -43,8 +43,8 @@ The yielding pattern is:
|
||||
3 value style: `yield <key name> <kind name> <value>`.
|
||||
|
||||
Test part output kinds:
|
||||
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz` file.
|
||||
- `data`: value is expected to be any python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
||||
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz_snappy` file.
|
||||
- `data`: value is expected to be any Python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
||||
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
||||
file named `meta.yaml`, if anything is yielded with `meta` empty.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
import shutil
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import sys
|
||||
@@ -8,11 +10,13 @@ from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from gen_base.gen_typing import TestProvider
|
||||
from snappy import compress
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
@@ -100,8 +104,11 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
log_file = Path(output_dir) / 'testgen_error_log.txt'
|
||||
|
||||
print(f"Generating tests into {output_dir}")
|
||||
print(f"Reading configs from {args.configs_path}")
|
||||
print(f'Error log file: {log_file}')
|
||||
|
||||
configs = args.config_list
|
||||
if configs is None:
|
||||
@@ -119,18 +126,32 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
|
||||
print(f"generating tests with config '{config_name}' ...")
|
||||
for test_case in tprov.make_cases():
|
||||
case_dir = Path(output_dir) / Path(config_name) / Path(test_case.fork_name) \
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name) \
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(config_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||
|
||||
if case_dir.exists():
|
||||
if not args.force:
|
||||
if not args.force and not incomplete_tag_file.exists():
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
continue
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files are not deleted but will be overwritten when a new version is produced')
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
|
||||
written_part = False
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
try:
|
||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
@@ -140,7 +161,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
except IOError as e:
|
||||
sys.exit(f'Error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}')
|
||||
|
||||
written_part = False
|
||||
meta = dict()
|
||||
|
||||
try:
|
||||
@@ -154,6 +174,7 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
output_part("ssz", name, dump_ssz_fn(data, name, file_mode))
|
||||
except SkippedTest as e:
|
||||
print(e)
|
||||
shutil.rmtree(case_dir)
|
||||
continue
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
@@ -163,10 +184,22 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
|
||||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||
traceback.print_exc()
|
||||
# Write to log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
shutil.rmtree(case_dir)
|
||||
else:
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
|
||||
print(f"completed {generator_name}")
|
||||
|
||||
|
||||
@@ -180,7 +213,8 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
|
||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.ssz')
|
||||
out_path = case_path / Path(name + '.ssz_snappy')
|
||||
compressed = compress(data)
|
||||
with out_path.open(file_mode + 'b') as f: # write in raw binary mode
|
||||
f.write(data)
|
||||
f.write(compressed)
|
||||
return dump
|
||||
105
tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py
Normal file
105
tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py
Normal file
@@ -0,0 +1,105 @@
|
||||
from importlib import reload, import_module
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Callable, Dict, Iterable, Optional
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import ALL_CONFIGS, TESTGEN_FORKS, SpecForkName, ConfigName
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner
|
||||
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: SpecForkName, bls_active: bool = True,
|
||||
phase: Optional[str]=None) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: the folder name for these tests.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
|
||||
if phase is None:
|
||||
phase = fork_name
|
||||
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
|
||||
)
|
||||
|
||||
|
||||
def get_provider(create_provider_fn: Callable[[SpecForkName, str, str, ConfigName], TestProvider],
|
||||
config_name: ConfigName,
|
||||
fork_name: SpecForkName,
|
||||
all_mods: Dict[str, Dict[str, str]]) -> Iterable[TestProvider]:
|
||||
for key, mod_name in all_mods[fork_name].items():
|
||||
yield create_provider_fn(
|
||||
fork_name=fork_name,
|
||||
handler_name=key,
|
||||
tests_src_mod_name=mod_name,
|
||||
config_name=config_name,
|
||||
)
|
||||
|
||||
|
||||
def get_create_provider_fn(
|
||||
runner_name: str, config_name: ConfigName, specs: Iterable[Any]
|
||||
) -> Callable[[SpecForkName, str, str, ConfigName], TestProvider]:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
for spec in specs:
|
||||
reload(spec)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def create_provider(fork_name: SpecForkName, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: ConfigName) -> TestProvider:
|
||||
def cases_fn() -> Iterable[TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
return create_provider
|
||||
|
||||
|
||||
def run_state_test_generators(runner_name: str, specs: Iterable[Any], all_mods: Dict[str, Dict[str, str]]) -> None:
|
||||
"""
|
||||
Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||
"""
|
||||
for config_name in ALL_CONFIGS:
|
||||
for fork_name in TESTGEN_FORKS:
|
||||
if fork_name in all_mods:
|
||||
gen_runner.run_generator(runner_name, get_provider(
|
||||
create_provider_fn=get_create_provider_fn(runner_name, config_name, specs),
|
||||
config_name=config_name,
|
||||
fork_name=fork_name,
|
||||
all_mods=all_mods,
|
||||
))
|
||||
0
tests/core/pyspec/eth2spec/test/altair/__init__.py
Normal file
0
tests/core/pyspec/eth2spec/test/altair/__init__.py
Normal file
@@ -0,0 +1,357 @@
|
||||
from collections import Counter
|
||||
import random
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.block_processing import run_block_processing_to
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
MAINNET, MINIMAL,
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
def run_sync_committee_processing(spec, state, block, expect_exception=False):
|
||||
"""
|
||||
Processes everything up to the sync committee work, then runs the sync committee work in isolation, and
|
||||
produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes.
|
||||
"""
|
||||
# process up to the sync committee work
|
||||
call = run_block_processing_to(spec, state, block, 'process_sync_committee')
|
||||
yield 'pre', state
|
||||
yield 'sync_aggregate', block.body.sync_aggregate
|
||||
if expect_exception:
|
||||
expect_assertion_error(lambda: call(state, block))
|
||||
yield 'post', None
|
||||
else:
|
||||
call(state, block)
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def get_committee_indices(spec, state, duplicates=False):
|
||||
"""
|
||||
This utility function allows the caller to ensure there are or are not
|
||||
duplicate validator indices in the returned committee based on
|
||||
the boolean ``duplicates``.
|
||||
"""
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
else:
|
||||
if len(committee) == len(set(committee)):
|
||||
return committee
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_missing_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
rng = random.Random(2020)
|
||||
random_participant = rng.choice(committee)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one participant whose signature was included.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[index != random_participant for index in committee],
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee, # full committee signs
|
||||
)
|
||||
)
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_extra_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
rng = random.Random(3030)
|
||||
random_participant = rng.choice(committee)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one signature even though the block claims the entire committee participated.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index in committee if index != random_participant],
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
|
||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
|
||||
|
||||
# Compute the participant and proposer sync rewards
|
||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||
committee_effective_balance = max(spec.EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance)
|
||||
effective_balance = state.validators[participant_index].effective_balance
|
||||
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
multiplicities = Counter(included_indices)
|
||||
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, participant_index, committee, committee_bits,
|
||||
)
|
||||
proposer_reward = spec.Gwei(inclusion_reward // spec.PROPOSER_REWARD_QUOTIENT)
|
||||
return spec.Gwei((inclusion_reward - proposer_reward) * multiplicities[participant_index])
|
||||
|
||||
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
|
||||
proposer_reward = 0
|
||||
for index, bit in zip(committee, committee_bits):
|
||||
if not bit:
|
||||
continue
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, index, committee, committee_bits,
|
||||
)
|
||||
proposer_reward += spec.Gwei(inclusion_reward // spec.PROPOSER_REWARD_QUOTIENT)
|
||||
return proposer_reward
|
||||
|
||||
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
|
||||
for index in range(len(post_state.validators)):
|
||||
reward = 0
|
||||
if index in committee:
|
||||
reward += compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
index,
|
||||
committee,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
committee,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||
|
||||
|
||||
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
yield 'pre', state
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=committee_bits,
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index, bit in zip(committee, committee_bits) if bit],
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block)
|
||||
|
||||
validate_sync_committee_rewards(
|
||||
spec,
|
||||
pre_state,
|
||||
state,
|
||||
committee,
|
||||
committee_bits,
|
||||
block.proposer_index,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
rng = random.Random(1010)
|
||||
committee_bits = [rng.choice([True, False]) for _ in committee]
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
|
||||
blocks = []
|
||||
for _ in range(2):
|
||||
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Valid sync committee signature here...
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Invalid signature from a slot other than the previous
|
||||
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
invalid_block.slot - 2,
|
||||
committee,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_previous_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_commitee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_commitee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_valid_signature_future_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_current_sync_committee = state.current_sync_committee
|
||||
old_next_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_committee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_committee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
sync_committee = state.current_sync_committee
|
||||
|
||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||
|
||||
assert sync_committee == expected_sync_committee
|
||||
assert sync_committee != old_current_sync_committee
|
||||
assert sync_committee != old_next_sync_committee
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block)
|
||||
@@ -0,0 +1,40 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_progress(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# NOTE: if not in the genesis epoch, period math below needs to be
|
||||
# adjusted relative to the current epoch
|
||||
assert current_epoch == 0
|
||||
|
||||
first_sync_committee = state.current_sync_committee
|
||||
second_sync_committee = state.next_sync_committee
|
||||
|
||||
slot_at_end_of_current_period = spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - 1
|
||||
transition_to(spec, state, slot_at_end_of_current_period)
|
||||
|
||||
# Ensure assignments have not changed:
|
||||
assert state.current_sync_committee == first_sync_committee
|
||||
assert state.next_sync_committee == second_sync_committee
|
||||
|
||||
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||
third_sync_committee = spec.get_sync_committee(state, 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
assert state.current_sync_committee == second_sync_committee
|
||||
assert state.next_sync_committee == third_sync_committee
|
||||
117
tests/core/pyspec/eth2spec/test/altair/fork/test_fork.py
Normal file
117
tests/core/pyspec/eth2spec/test/altair/fork/test_fork.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, ALTAIR,
|
||||
MINIMAL,
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_configs,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
|
||||
|
||||
ALTAIR_FORK_TEST_META_TAGS = {
|
||||
'fork': 'altair',
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_altair(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_base_state(spec, phases, state):
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch(spec, phases, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_many_next_epoch(spec, phases, state):
|
||||
for _ in range(3):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_low_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_misc_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@with_configs([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_large_validator_set(spec, phases, state):
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
101
tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py
Normal file
101
tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import random
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
build_empty_block,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[index in participants for index in committee],
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
participants,
|
||||
)
|
||||
)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_inactivity_scores(spec, state):
|
||||
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):
|
||||
next_epoch_via_block(spec, state)
|
||||
|
||||
assert spec.is_in_inactivity_leak(state)
|
||||
previous_inactivity_scores = state.inactivity_scores.copy()
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# Block transition to next epoch
|
||||
block = build_empty_block(spec, state, slot=state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
for pre, post in zip(previous_inactivity_scores, state.inactivity_scores):
|
||||
assert post == pre + spec.INACTIVITY_SCORE_BIAS
|
||||
@@ -0,0 +1,35 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
ALTAIR,
|
||||
)
|
||||
from eth2spec.test.helpers.merkle import build_proof
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
def test_next_sync_committee_tree(spec, state):
|
||||
state.next_sync_committee: object = spec.SyncCommittee(
|
||||
pubkeys=[state.validators[i]for i in range(spec.SYNC_COMMITTEE_SIZE)]
|
||||
)
|
||||
next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=state.next_sync_committee.hash_tree_root(),
|
||||
branch=next_sync_committee_branch,
|
||||
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=state.hash_tree_root(),
|
||||
)
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
def test_finality_root_tree(spec, state):
|
||||
finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=state.finalized_checkpoint.root,
|
||||
branch=finality_branch,
|
||||
depth=spec.floorlog2(spec.FINALIZED_ROOT_INDEX),
|
||||
index=spec.get_subtree_index(spec.FINALIZED_ROOT_INDEX),
|
||||
root=state.hash_tree_root(),
|
||||
)
|
||||
@@ -0,0 +1,216 @@
|
||||
from eth2spec.test.context import (
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_configs,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block,
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.helpers.merkle import build_proof
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
def test_process_light_client_update_not_updated(spec, state):
|
||||
pre_snapshot = spec.LightClientSnapshot(
|
||||
header=spec.BeaconBlockHeader(),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
)
|
||||
|
||||
# Block at slot 1 doesn't increase sync committee period, so it won't update snapshot
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
block_header = spec.BeaconBlockHeader(
|
||||
slot=signed_block.message.slot,
|
||||
proposer_index=signed_block.message.proposer_index,
|
||||
parent_root=signed_block.message.parent_root,
|
||||
state_root=signed_block.message.state_root,
|
||||
body_root=signed_block.message.body.hash_tree_root(),
|
||||
)
|
||||
# Sync committee signing the header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot,
|
||||
committee,
|
||||
)
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
||||
# Ensure that finality checkpoint is genesis
|
||||
assert state.finalized_checkpoint.epoch == 0
|
||||
# Finality is unchanged
|
||||
finality_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
header=block_header,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finality_header=finality_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_committee_bits=sync_committee_bits,
|
||||
sync_committee_signature=sync_committee_signature,
|
||||
fork_version=state.fork.current_version,
|
||||
)
|
||||
|
||||
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||
|
||||
assert len(store.valid_updates) == 1
|
||||
assert store.valid_updates[0] == update
|
||||
assert store.snapshot == pre_snapshot
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_timeout(spec, state):
|
||||
pre_snapshot = spec.LightClientSnapshot(
|
||||
header=spec.BeaconBlockHeader(),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
)
|
||||
|
||||
# Forward to next sync committee period
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD))
|
||||
snapshot_period = spec.compute_epoch_at_slot(pre_snapshot.header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
assert snapshot_period + 1 == update_period
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
block_header = spec.BeaconBlockHeader(
|
||||
slot=signed_block.message.slot,
|
||||
proposer_index=signed_block.message.proposer_index,
|
||||
parent_root=signed_block.message.parent_root,
|
||||
state_root=signed_block.message.state_root,
|
||||
body_root=signed_block.message.body.hash_tree_root(),
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block_header.slot,
|
||||
committee,
|
||||
block_root=spec.Root(block_header.hash_tree_root()),
|
||||
)
|
||||
|
||||
# Sync committee is updated
|
||||
next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
# Finality is unchanged
|
||||
finality_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
header=block_header,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finality_header=finality_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_committee_bits=sync_committee_bits,
|
||||
sync_committee_signature=sync_committee_signature,
|
||||
fork_version=state.fork.current_version,
|
||||
)
|
||||
|
||||
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||
|
||||
# snapshot has been updated
|
||||
assert len(store.valid_updates) == 0
|
||||
assert store.snapshot.header == update.header
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_finality_updated(spec, state):
|
||||
pre_snapshot = spec.LightClientSnapshot(
|
||||
header=spec.BeaconBlockHeader(),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
)
|
||||
|
||||
# Change finality
|
||||
blocks = []
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
|
||||
for epoch in range(3):
|
||||
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
|
||||
blocks += new_blocks
|
||||
# Ensure that finality checkpoint has changed
|
||||
assert state.finalized_checkpoint.epoch == 3
|
||||
# Ensure that it's same period
|
||||
snapshot_period = spec.compute_epoch_at_slot(pre_snapshot.header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
assert snapshot_period == update_period
|
||||
|
||||
# Updated sync_committee and finality
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
finalized_block_header = blocks[spec.SLOTS_PER_EPOCH - 1].message
|
||||
assert finalized_block_header.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
assert finalized_block_header.hash_tree_root() == state.finalized_checkpoint.root
|
||||
finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
|
||||
|
||||
# Build block header
|
||||
block = build_empty_block(spec, state)
|
||||
block_header = spec.BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
proposer_index=block.proposer_index,
|
||||
parent_root=block.parent_root,
|
||||
state_root=state.hash_tree_root(),
|
||||
body_root=block.body.hash_tree_root(),
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block_header.slot,
|
||||
committee,
|
||||
block_root=spec.Root(block_header.hash_tree_root()),
|
||||
)
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
header=finalized_block_header,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finality_header=block_header, # block_header is the signed header
|
||||
finality_branch=finality_branch,
|
||||
sync_committee_bits=sync_committee_bits,
|
||||
sync_committee_signature=sync_committee_signature,
|
||||
fork_version=state.fork.current_version,
|
||||
)
|
||||
|
||||
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||
|
||||
# snapshot has been updated
|
||||
assert len(store.valid_updates) == 0
|
||||
assert store.snapshot.header == update.header
|
||||
@@ -2,6 +2,7 @@ import pytest
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
@@ -19,6 +20,7 @@ from importlib import reload
|
||||
def reload_specs():
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
reload(spec_altair)
|
||||
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
@@ -28,11 +30,17 @@ ConfigName = NewType("ConfigName", str)
|
||||
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
PHASE1 = SpecForkName('phase1')
|
||||
ALL_PHASES = (PHASE0, PHASE1)
|
||||
ALTAIR = SpecForkName('altair')
|
||||
|
||||
ALL_PHASES = (PHASE0, PHASE1, ALTAIR)
|
||||
|
||||
MAINNET = ConfigName('mainnet')
|
||||
MINIMAL = ConfigName('minimal')
|
||||
|
||||
ALL_CONFIGS = (MINIMAL, MAINNET)
|
||||
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR)
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
@@ -47,14 +55,18 @@ class SpecPhase0(Spec):
|
||||
|
||||
|
||||
class SpecPhase1(Spec):
|
||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
||||
...
|
||||
...
|
||||
|
||||
|
||||
class SpecAltair(Spec):
|
||||
...
|
||||
|
||||
|
||||
# add transfer, bridge, etc. as the spec evolves
|
||||
class SpecForks(TypedDict, total=False):
|
||||
PHASE0: SpecPhase0
|
||||
PHASE1: SpecPhase1
|
||||
ALTAIR: SpecAltair
|
||||
|
||||
|
||||
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||
@@ -70,6 +82,8 @@ def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Ca
|
||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||
# Decide based on performance/consistency results later.
|
||||
state = phases[PHASE1].upgrade_to_phase1(state)
|
||||
elif spec.fork == ALTAIR:
|
||||
state = phases[ALTAIR].upgrade_to_altair(state)
|
||||
|
||||
return state
|
||||
|
||||
@@ -326,23 +340,28 @@ def with_phases(phases, other_phases=None):
|
||||
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases += set(other_phases)
|
||||
available_phases |= set(other_phases)
|
||||
|
||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||
available_phases.add(PHASE0)
|
||||
|
||||
# Populate all phases for multi-phase tests
|
||||
phase_dir = {}
|
||||
if PHASE0 in available_phases:
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
if PHASE1 in available_phases:
|
||||
phase_dir[PHASE1] = spec_phase1
|
||||
if ALTAIR in available_phases:
|
||||
phase_dir[ALTAIR] = spec_altair
|
||||
|
||||
# return is ignored whenever multiple phases are ran. If
|
||||
if PHASE0 in run_phases:
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if PHASE1 in run_phases:
|
||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
||||
if ALTAIR in run_phases:
|
||||
ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw)
|
||||
return ret
|
||||
return wrapper
|
||||
return decorator
|
||||
@@ -376,3 +395,11 @@ def only_full_crosslink(fn):
|
||||
return None
|
||||
return fn(*args, spec=spec, state=state, **kw)
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
if spec.fork in [PHASE0, PHASE1]:
|
||||
# TODO: PHASE1 fork is temporarily parallel to ALTAIR.
|
||||
# Will make PHASE1 fork inherit ALTAIR later.
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -2,7 +2,7 @@ from lru import LRU
|
||||
|
||||
from typing import List
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1, is_post_altair
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
@@ -30,17 +30,22 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
if not is_post_altair(spec):
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
|
||||
# process attestation
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
# Make sure the attestation has been processed
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
if not is_post_altair(spec):
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
# After accounting reform, there are cases when processing an attestation does not result in any flag updates
|
||||
pass
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
@@ -315,7 +320,8 @@ def prepare_state_with_attestations(spec, state, participation_fn=None):
|
||||
next_slot(spec, state)
|
||||
|
||||
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
if not is_post_altair(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
return attestations
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
@@ -89,6 +90,10 @@ def build_empty_block(spec, state, slot=None):
|
||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
empty_block.parent_root = parent_block_root
|
||||
|
||||
if is_post_altair(spec):
|
||||
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
||||
|
||||
60
tests/core/pyspec/eth2spec/test/helpers/block_processing.py
Normal file
60
tests/core/pyspec/eth2spec/test/helpers/block_processing.py
Normal file
@@ -0,0 +1,60 @@
|
||||
def for_ops(state, operations, fn) -> None:
|
||||
for operation in operations:
|
||||
fn(state, operation)
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
return {
|
||||
# PHASE0
|
||||
'process_block_header':
|
||||
lambda state, block: spec.process_block_header(state, block),
|
||||
'process_randao':
|
||||
lambda state, block: spec.process_randao(state, block.body),
|
||||
'process_eth1_data':
|
||||
lambda state, block: spec.process_eth1_data(state, block.body),
|
||||
'process_proposer_slashing':
|
||||
lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing),
|
||||
'process_attester_slashing':
|
||||
lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing),
|
||||
'process_attestation':
|
||||
lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation),
|
||||
'process_deposit':
|
||||
lambda state, block: for_ops(state, block.body.deposits, spec.process_deposit),
|
||||
'process_voluntary_exit':
|
||||
lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit),
|
||||
# Altair
|
||||
'process_sync_committee':
|
||||
lambda state, block: spec.process_sync_committee(state, block.body.sync_aggregate),
|
||||
# PHASE1
|
||||
'process_custody_game_operations':
|
||||
lambda state, block: spec.process_custody_game_operations(state, block.body),
|
||||
'process_shard_transitions':
|
||||
lambda state, block: spec.process_shard_transitions(
|
||||
state, block.body.shard_transitions, block.body.attestations),
|
||||
}
|
||||
|
||||
|
||||
def run_block_processing_to(spec, state, block, process_name: str):
|
||||
"""
|
||||
Processes to the block transition, up to, but not including, the sub-transition named ``process_name``.
|
||||
Returns a Callable[[state, block], None] for the remaining ``process_name`` transition.
|
||||
|
||||
Tests should create full blocks to ensure a valid state transition, even if the operation itself is isolated.
|
||||
(e.g. latest_header in the beacon state is up-to-date in a sync-committee test).
|
||||
|
||||
A test prepares a pre-state by calling this function, output the pre-state,
|
||||
and it can then proceed to run the returned callable, and output a post-state.
|
||||
"""
|
||||
print(f"state.slot {state.slot} block.slot {block.slot}")
|
||||
# transition state to slot before block state transition
|
||||
if state.slot < block.slot:
|
||||
spec.process_slots(state, block.slot)
|
||||
print(f"state.slot {state.slot} block.slot {block.slot} A")
|
||||
|
||||
# process components of block transition
|
||||
for name, call in get_process_calls(spec).items():
|
||||
if name == process_name:
|
||||
return call
|
||||
# only run when present. Later phases introduce more to the block-processing.
|
||||
if hasattr(spec, name):
|
||||
call(state, block)
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.merkle import build_proof
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, ByteList
|
||||
from remerkleable.tree import gindex_bit_iter
|
||||
|
||||
BYTES_PER_CHUNK = 32
|
||||
|
||||
@@ -116,26 +116,6 @@ def custody_chunkify(spec, x):
|
||||
return [ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](c) for c in chunks]
|
||||
|
||||
|
||||
def build_proof(anchor, leaf_index):
|
||||
if leaf_index <= 1:
|
||||
return [] # Nothing to prove / invalid index
|
||||
node = anchor
|
||||
proof = []
|
||||
# Walk down, top to bottom to the leaf
|
||||
bit_iter, _ = gindex_bit_iter(leaf_index)
|
||||
for bit in bit_iter:
|
||||
# Always take the opposite hand for the proof.
|
||||
# 1 = right as leaf, thus get left
|
||||
if bit:
|
||||
proof.append(node.get_left().merkle_root())
|
||||
node = node.get_right()
|
||||
else:
|
||||
proof.append(node.get_right().merkle_root())
|
||||
node = node.get_left()
|
||||
|
||||
return list(reversed(proof))
|
||||
|
||||
|
||||
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
|
||||
block_length_or_custody_data,
|
||||
invalid_chunk_data=False):
|
||||
|
||||
@@ -1,14 +1,29 @@
|
||||
|
||||
process_calls = [
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_final_updates',
|
||||
'after_process_final_updates',
|
||||
]
|
||||
from eth2spec.test.context import is_post_altair
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
return [
|
||||
# PHASE0
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_eth1_data_reset',
|
||||
'process_effective_balance_updates',
|
||||
'process_slashings_reset',
|
||||
'process_randao_mixes_reset',
|
||||
'process_historical_roots_update',
|
||||
# Altair replaced `process_participation_record_updates` with `process_participation_flag_updates`
|
||||
'process_participation_flag_updates' if is_post_altair(spec) else (
|
||||
'process_participation_record_updates'
|
||||
),
|
||||
'process_sync_committee_updates',
|
||||
# PHASE1
|
||||
'process_phase_1_final_updates',
|
||||
]
|
||||
|
||||
|
||||
def run_epoch_processing_to(spec, state, process_name: str):
|
||||
@@ -25,7 +40,7 @@ def run_epoch_processing_to(spec, state, process_name: str):
|
||||
spec.process_slot(state)
|
||||
|
||||
# process components of epoch transition before final-updates
|
||||
for name in process_calls:
|
||||
for name in get_process_calls(spec):
|
||||
if name == process_name:
|
||||
break
|
||||
# only run when present. Later phases introduce more to the epoch-processing.
|
||||
@@ -1,3 +1,5 @@
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.phase0 import spec as phase0_spec
|
||||
|
||||
|
||||
@@ -18,7 +20,23 @@ def add_block_to_store(spec, store, signed_block):
|
||||
spec.on_block(store, signed_block)
|
||||
|
||||
|
||||
def add_attestation_to_store(spec, store, attestation):
|
||||
def tick_and_run_on_block(spec, store, signed_block, test_steps=None):
|
||||
if test_steps is None:
|
||||
test_steps = []
|
||||
|
||||
pre_state = store.block_states[signed_block.message.parent_root]
|
||||
block_time = pre_state.genesis_time + signed_block.message.slot * spec.SECONDS_PER_SLOT
|
||||
|
||||
if store.time < block_time:
|
||||
on_tick_and_append_step(spec, store, block_time, test_steps)
|
||||
|
||||
yield from run_on_block(spec, store, signed_block, test_steps)
|
||||
|
||||
|
||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps=None):
|
||||
if test_steps is None:
|
||||
test_steps = []
|
||||
|
||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
|
||||
@@ -26,12 +44,72 @@ def add_attestation_to_store(spec, store, attestation):
|
||||
|
||||
if store.time < next_epoch_time:
|
||||
spec.on_tick(store, next_epoch_time)
|
||||
test_steps.append({'tick': int(next_epoch_time)})
|
||||
|
||||
spec.on_attestation(store, attestation)
|
||||
yield get_attestation_file_name(attestation), attestation
|
||||
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||
|
||||
|
||||
def get_genesis_forkchoice_store(spec, genesis_state):
|
||||
store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state)
|
||||
return store
|
||||
|
||||
|
||||
def get_genesis_forkchoice_store_and_block(spec, genesis_state):
|
||||
assert genesis_state.slot == spec.GENESIS_SLOT
|
||||
# The genesis block must be a Phase 0 `BeaconBlock`
|
||||
genesis_block = phase0_spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
|
||||
return spec.get_forkchoice_store(genesis_state, genesis_block)
|
||||
return spec.get_forkchoice_store(genesis_state, genesis_block), genesis_block
|
||||
|
||||
|
||||
def get_block_file_name(block):
|
||||
return f"block_{encode_hex(block.hash_tree_root())}"
|
||||
|
||||
|
||||
def get_attestation_file_name(attestation):
|
||||
return f"attestation_{encode_hex(attestation.hash_tree_root())}"
|
||||
|
||||
|
||||
def on_tick_and_append_step(spec, store, time, test_steps):
|
||||
spec.on_tick(store, time)
|
||||
test_steps.append({'tick': int(time)})
|
||||
|
||||
|
||||
def run_on_block(spec, store, signed_block, test_steps, valid=True):
|
||||
if not valid:
|
||||
try:
|
||||
spec.on_block(store, signed_block)
|
||||
|
||||
except AssertionError:
|
||||
return
|
||||
else:
|
||||
assert False
|
||||
|
||||
spec.on_block(store, signed_block)
|
||||
yield get_block_file_name(signed_block), signed_block
|
||||
test_steps.append({'block': get_block_file_name(signed_block)})
|
||||
|
||||
# An on_block step implies receiving block's attestations
|
||||
for attestation in signed_block.message.body.attestations:
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'time': int(store.time),
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
|
||||
'finalized_checkpoint_root': encode_hex(store.finalized_checkpoint.root),
|
||||
'best_justified_checkpoint': encode_hex(store.best_justified_checkpoint.root),
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def get_formatted_head_output(spec, store):
|
||||
head = spec.get_head(store)
|
||||
slot = store.blocks[head].slot
|
||||
return {
|
||||
'slot': int(slot),
|
||||
'root': encode_hex(head),
|
||||
}
|
||||
|
||||
21
tests/core/pyspec/eth2spec/test/helpers/merkle.py
Normal file
21
tests/core/pyspec/eth2spec/test/helpers/merkle.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from remerkleable.tree import gindex_bit_iter
|
||||
|
||||
|
||||
def build_proof(anchor, leaf_index):
|
||||
if leaf_index <= 1:
|
||||
return [] # Nothing to prove / invalid index
|
||||
node = anchor
|
||||
proof = []
|
||||
# Walk down, top to bottom to the leaf
|
||||
bit_iter, _ = gindex_bit_iter(leaf_index)
|
||||
for bit in bit_iter:
|
||||
# Always take the opposite hand for the proof.
|
||||
# 1 = right as leaf, thus get left
|
||||
if bit:
|
||||
proof.append(node.get_left().merkle_root())
|
||||
node = node.get_right()
|
||||
else:
|
||||
proof.append(node.get_right().merkle_root())
|
||||
node = node.get_left()
|
||||
|
||||
return list(reversed(proof))
|
||||
@@ -1,8 +1,16 @@
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
|
||||
|
||||
def get_min_slashing_penalty_quotient(spec):
|
||||
if is_post_altair(spec):
|
||||
return spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR
|
||||
else:
|
||||
return spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
|
||||
|
||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
slashed_validator = state.validators[slashed_index]
|
||||
assert slashed_validator.slashed
|
||||
@@ -10,7 +18,7 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
|
||||
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
if proposer_index != slashed_index:
|
||||
# slashed validator lost initial slash penalty
|
||||
|
||||
@@ -2,6 +2,7 @@ from random import Random
|
||||
from lru import LRU
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
@@ -26,6 +27,26 @@ def has_enough_for_reward(spec, state, index):
|
||||
)
|
||||
|
||||
|
||||
def has_enough_for_leak_penalty(spec, state, index):
|
||||
"""
|
||||
Check if effective_balance and state of leak is high enough for a leak penalty.
|
||||
|
||||
At very low balances / leak values, it is possible for a validator have a positive effective_balance
|
||||
and be in a leak, but have zero leak penalty.
|
||||
"""
|
||||
|
||||
if is_post_altair(spec):
|
||||
return (
|
||||
state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
> spec.INACTIVITY_SCORE_BIAS * spec.INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||
)
|
||||
else:
|
||||
return (
|
||||
state.validators[index].effective_balance * spec.get_finality_delay(state)
|
||||
> spec.INACTIVITY_PENALTY_QUOTIENT
|
||||
)
|
||||
|
||||
|
||||
def run_deltas(spec, state):
|
||||
"""
|
||||
Run all deltas functions yielding:
|
||||
@@ -33,35 +54,58 @@ def run_deltas(spec, state):
|
||||
- source deltas ('source_deltas')
|
||||
- target deltas ('target_deltas')
|
||||
- head deltas ('head_deltas')
|
||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||
- not if is_post_altair(spec)
|
||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
if is_post_altair(spec):
|
||||
def get_source_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
|
||||
|
||||
def get_head_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
|
||||
|
||||
def get_target_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
|
||||
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_source_deltas,
|
||||
spec.get_source_deltas if not is_post_altair(spec) else get_source_deltas,
|
||||
spec.get_matching_source_attestations,
|
||||
'source_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_target_deltas,
|
||||
spec.get_target_deltas if not is_post_altair(spec) else get_target_deltas,
|
||||
spec.get_matching_target_attestations,
|
||||
'target_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_head_deltas,
|
||||
spec.get_head_deltas if not is_post_altair(spec) else get_head_deltas,
|
||||
spec.get_matching_head_attestations,
|
||||
'head_deltas',
|
||||
)
|
||||
yield from run_get_inclusion_delay_deltas(spec, state)
|
||||
if not is_post_altair(spec):
|
||||
yield from run_get_inclusion_delay_deltas(spec, state)
|
||||
yield from run_get_inactivity_penalty_deltas(spec, state)
|
||||
|
||||
|
||||
def deltas_name_to_flag_index(spec, deltas_name):
|
||||
if 'source' in deltas_name:
|
||||
return spec.TIMELY_SOURCE_FLAG_INDEX
|
||||
elif 'head' in deltas_name:
|
||||
return spec.TIMELY_HEAD_FLAG_INDEX
|
||||
elif 'target' in deltas_name:
|
||||
return spec.TIMELY_TARGET_FLAG_INDEX
|
||||
raise ValueError("Wrong deltas_name %s" % deltas_name)
|
||||
|
||||
|
||||
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
||||
"""
|
||||
Run ``component_delta_fn``, yielding:
|
||||
@@ -71,8 +115,14 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||
|
||||
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
if not is_post_altair(spec):
|
||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_indices = spec.get_unslashed_participating_indices(
|
||||
state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)
|
||||
)
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
if index not in eligible_indices:
|
||||
@@ -101,6 +151,12 @@ def run_get_inclusion_delay_deltas(spec, state):
|
||||
Run ``get_inclusion_delay_deltas``, yielding:
|
||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||
"""
|
||||
if is_post_altair(spec):
|
||||
# No inclusion_delay_deltas
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=[0] * len(state.validators),
|
||||
penalties=[0] * len(state.validators))
|
||||
return
|
||||
|
||||
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
||||
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
@@ -148,8 +204,13 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
|
||||
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
if not is_post_altair(spec):
|
||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_attesting_indices = spec.get_unslashed_participating_indices(
|
||||
state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)
|
||||
)
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
@@ -159,11 +220,20 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
continue
|
||||
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
# Compute base_penalty
|
||||
base_reward = spec.get_base_reward(state, index)
|
||||
base_penalty = spec.BASE_REWARDS_PER_EPOCH * base_reward - spec.get_proposer_reward(state, index)
|
||||
if not is_post_altair(spec):
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||
else:
|
||||
base_penalty = sum(
|
||||
base_reward * numerator // spec.WEIGHT_DENOMINATOR
|
||||
for (_, numerator) in spec.get_flag_indices_and_weights()
|
||||
)
|
||||
|
||||
if not has_enough_for_reward(spec, state, index):
|
||||
assert penalties[index] == 0
|
||||
elif index in matching_attesting_indices:
|
||||
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
||||
assert penalties[index] == base_penalty
|
||||
else:
|
||||
assert penalties[index] > base_penalty
|
||||
@@ -173,7 +243,8 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
|
||||
def transition_state_to_leak(spec, state, epochs=None):
|
||||
if epochs is None:
|
||||
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||
# +1 to trigger inactivity_score transitions
|
||||
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 1
|
||||
assert epochs >= spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||
|
||||
for _ in range(epochs):
|
||||
@@ -262,8 +333,13 @@ def run_test_full_all_correct(spec, state):
|
||||
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
for a in state.previous_epoch_attestations:
|
||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||
if not is_post_altair(spec):
|
||||
for a in state.previous_epoch_attestations:
|
||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
if rng.choice([True, False]):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@@ -272,8 +348,12 @@ def run_test_partial(spec, state, fraction_filled):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
# Remove portion of attestations
|
||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||
if not is_post_altair(spec):
|
||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||
else:
|
||||
for index in range(int(len(state.validators) * fraction_filled)):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@@ -328,13 +408,18 @@ def run_test_some_very_low_effective_balances_that_attested(spec, state):
|
||||
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
# Remove attestation
|
||||
attestation = state.previous_epoch_attestations[0]
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||
# Set removed indices effective balance to very low amount
|
||||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||
for i, index in enumerate(indices):
|
||||
state.validators[index].effective_balance = i
|
||||
if not is_post_altair(spec):
|
||||
# Remove attestation
|
||||
attestation = state.previous_epoch_attestations[0]
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||
# Set removed indices effective balance to very low amount
|
||||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||
for i, index in enumerate(indices):
|
||||
state.validators[index].effective_balance = i
|
||||
else:
|
||||
index = 0
|
||||
state.validators[index].effective_balance = 1
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@@ -442,16 +527,43 @@ def run_test_full_random(spec, state, rng=Random(8020)):
|
||||
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False]) for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
if not is_post_altair(spec):
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||
for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
# ~1/3 have bad head or bad target or not timely enough
|
||||
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||
flags = state.previous_epoch_participation[index]
|
||||
|
||||
def set_flag(index, value):
|
||||
nonlocal flags
|
||||
flag = spec.ParticipationFlags(2**index)
|
||||
if value:
|
||||
flags |= flag
|
||||
else:
|
||||
flags &= 0xff ^ flag
|
||||
|
||||
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||
if is_timely_correct_head:
|
||||
# If timely head, then must be timely target
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||
# If timely head, then must be timely source
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||
else:
|
||||
# ~50% of remaining have bad target or not timely enough
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||
# ~50% of remaining have bad source or not timely enough
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||
state.previous_epoch_participation[index] = flags
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
35
tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
Normal file
35
tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE, spec.compute_epoch_at_slot(slot))
|
||||
if block_root is None:
|
||||
if slot == state.slot:
|
||||
block_root = build_empty_block_for_next_slot(spec, state).parent_root
|
||||
else:
|
||||
block_root = spec.get_block_root_at_slot(state, slot)
|
||||
signing_root = spec.compute_signing_root(block_root, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def compute_aggregate_sync_committee_signature(spec, state, slot, participants, block_root=None):
|
||||
if len(participants) == 0:
|
||||
return spec.G2_POINT_AT_INFINITY
|
||||
|
||||
signatures = []
|
||||
for validator_index in participants:
|
||||
privkey = privkeys[validator_index]
|
||||
signatures.append(
|
||||
compute_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
slot,
|
||||
privkey,
|
||||
block_root=block_root,
|
||||
)
|
||||
)
|
||||
return bls.Aggregate(signatures)
|
||||
@@ -2,10 +2,13 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
always_bls, never_bls,
|
||||
with_all_phases,
|
||||
with_all_phases_except,
|
||||
spec_test,
|
||||
low_balances,
|
||||
with_custom_state,
|
||||
single_phase)
|
||||
single_phase,
|
||||
PHASE1,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
get_valid_attestation,
|
||||
@@ -329,3 +332,212 @@ def test_too_few_aggregation_bits(spec, state):
|
||||
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Full correct atttestation contents at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head but correct source/target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head and target but correct source at different slot inclusions
|
||||
#
|
||||
|
||||
# Note: current phase 1 spec checks
|
||||
# `assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))`
|
||||
# so this test can't pass that until phase 1 refactor is merged
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Correct head and source but incorrect target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
||||
spec_state_test, expect_assertion_error, always_bls, with_all_phases,
|
||||
with_custom_state, spec_test, single_phase,
|
||||
low_balances, misc_balances,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||
from eth2spec.test.helpers.proposer_slashings import get_min_slashing_penalty_quotient
|
||||
from eth2spec.test.helpers.state import (
|
||||
get_balance,
|
||||
next_epoch_via_block,
|
||||
@@ -31,15 +36,19 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
pre_proposer_balance = get_balance(state, proposer_index)
|
||||
pre_slashings = {slashed_index: get_balance(state, slashed_index) for slashed_index in slashed_indices}
|
||||
pre_slashing_balances = {slashed_index: get_balance(state, slashed_index) for slashed_index in slashed_indices}
|
||||
pre_slashing_effectives = {
|
||||
slashed_index: state.validators[slashed_index].effective_balance
|
||||
for slashed_index in slashed_indices
|
||||
}
|
||||
pre_withdrawalable_epochs = {
|
||||
slashed_index: state.validators[slashed_index].withdrawable_epoch
|
||||
for slashed_index in slashed_indices
|
||||
}
|
||||
|
||||
total_proposer_rewards = sum(
|
||||
balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
for balance in pre_slashings.values()
|
||||
effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
for effective_balance in pre_slashing_effectives.values()
|
||||
)
|
||||
|
||||
# Process slashing
|
||||
@@ -60,7 +69,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
||||
assert slashed_validator.withdrawable_epoch == expected_withdrawable_epoch
|
||||
else:
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert get_balance(state, slashed_index) < pre_slashings[slashed_index]
|
||||
assert get_balance(state, slashed_index) < pre_slashing_balances[slashed_index]
|
||||
|
||||
if proposer_index not in slashed_indices:
|
||||
# gained whistleblower reward
|
||||
@@ -70,7 +79,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
||||
expected_balance = (
|
||||
pre_proposer_balance
|
||||
+ total_proposer_rewards
|
||||
- pre_slashings[proposer_index] // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
- pre_slashing_effectives[proposer_index] // get_min_slashing_penalty_quotient(spec)
|
||||
)
|
||||
|
||||
assert get_balance(state, proposer_index) == expected_balance
|
||||
@@ -117,6 +126,41 @@ def test_success_already_exited_recent(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_low_balances(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_misc_balances(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_with_effective_balance_disparity(spec, state):
|
||||
# Jitter balances to be different from effective balances
|
||||
for i in range(len(state.balances)):
|
||||
pre = int(state.balances[i])
|
||||
state.balances[i] += random.randrange(max(pre - 5000, 0), pre + 5000)
|
||||
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
|
||||
@@ -1,46 +1,5 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@@ -48,7 +7,7 @@ def test_eth1_vote_reset(spec, state):
|
||||
def test_effective_balance_hysteresis(spec, state):
|
||||
# Prepare state up to the final-updates.
|
||||
# Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
|
||||
run_epoch_processing_to(spec, state, 'process_final_updates')
|
||||
run_epoch_processing_to(spec, state, 'process_effective_balance_updates')
|
||||
# Set some edge cases for balances
|
||||
max = spec.MAX_EFFECTIVE_BALANCE
|
||||
min = spec.EJECTION_BALANCE
|
||||
@@ -80,20 +39,8 @@ def test_effective_balance_hysteresis(spec, state):
|
||||
state.balances[i] = bal
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_final_updates(state)
|
||||
spec.process_effective_balance_updates(state)
|
||||
yield 'post', state
|
||||
|
||||
for i, (_, _, post_eff, name) in enumerate(cases):
|
||||
assert state.validators[i].effective_balance == post_eff, name
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
||||
@@ -0,0 +1,43 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_eth1_data_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_eth1_data_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
||||
@@ -0,0 +1,20 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_historical_roots_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_historical_roots_update(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
||||
@@ -1,6 +1,6 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with
|
||||
from eth2spec.test.context import is_post_altair, spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
@@ -16,12 +16,20 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
previous_epoch = spec.get_previous_epoch(state)
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
||||
if current_epoch == epoch:
|
||||
attestations = state.current_epoch_attestations
|
||||
elif previous_epoch == epoch:
|
||||
attestations = state.previous_epoch_attestations
|
||||
if not is_post_altair(spec):
|
||||
if current_epoch == epoch:
|
||||
attestations = state.current_epoch_attestations
|
||||
elif previous_epoch == epoch:
|
||||
attestations = state.previous_epoch_attestations
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
if current_epoch == epoch:
|
||||
epoch_participation = state.current_epoch_participation
|
||||
elif previous_epoch == epoch:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
remaining_balance = int(total_balance * 2 // 3) # can become negative
|
||||
@@ -52,19 +60,28 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
for i in range(max(len(committee) // 5, 1)):
|
||||
aggregation_bits[i] = 0
|
||||
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=spec.AttestationData(
|
||||
slot=slot,
|
||||
beacon_block_root=b'\xff' * 32, # irrelevant to testing
|
||||
source=source,
|
||||
target=target,
|
||||
index=index,
|
||||
),
|
||||
inclusion_delay=1,
|
||||
))
|
||||
if messed_up_target:
|
||||
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
|
||||
# Update state
|
||||
if not is_post_altair(spec):
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=spec.AttestationData(
|
||||
slot=slot,
|
||||
beacon_block_root=b'\xff' * 32, # irrelevant to testing
|
||||
source=source,
|
||||
target=target,
|
||||
index=index,
|
||||
),
|
||||
inclusion_delay=1,
|
||||
))
|
||||
if messed_up_target:
|
||||
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
|
||||
else:
|
||||
for i, index in enumerate(committee):
|
||||
if aggregation_bits[i]:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_HEAD_FLAG_INDEX)
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||
if not messed_up_target:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
|
||||
def get_checkpoints(spec, epoch):
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
from eth2spec.test.context import PHASE0, spec_state_test, with_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_participation_record_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_participation_record_updates')
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_updated_participation_record(spec, state):
|
||||
state.previous_epoch_attestations = [spec.PendingAttestation(proposer_index=100)]
|
||||
current_epoch_attestations = [spec.PendingAttestation(proposer_index=200)]
|
||||
state.current_epoch_attestations = current_epoch_attestations
|
||||
|
||||
yield from run_process_participation_record_updates(spec, state)
|
||||
|
||||
assert state.previous_epoch_attestations == current_epoch_attestations
|
||||
assert state.current_epoch_attestations == []
|
||||
@@ -0,0 +1,21 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_randao_mixes_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_randao_mixes_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_updated_randao_mixes(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] = b'\x56' * 32
|
||||
|
||||
yield from run_process_randao_mixes_reset(spec, state)
|
||||
|
||||
assert state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == spec.get_randao_mix(
|
||||
state, spec.get_current_epoch(state)
|
||||
)
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
|
||||
def run_process_registry_updates(spec, state):
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, spec_test,
|
||||
with_all_phases, single_phase,
|
||||
with_phases, PHASE0,
|
||||
with_phases, PHASE0, PHASE1,
|
||||
with_custom_state,
|
||||
zero_activation_threshold,
|
||||
misc_balances, low_single_balance,
|
||||
is_post_altair,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
@@ -18,7 +19,7 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from random import Random
|
||||
|
||||
|
||||
@@ -26,6 +27,43 @@ def run_process_rewards_and_penalties(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties')
|
||||
|
||||
|
||||
def validate_resulting_balances(spec, pre_state, post_state, attestations):
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(post_state, attestations)
|
||||
current_epoch = spec.get_current_epoch(post_state)
|
||||
|
||||
for index in range(len(pre_state.validators)):
|
||||
if not spec.is_active_validator(pre_state.validators[index], current_epoch):
|
||||
assert post_state.balances[index] == pre_state.balances[index]
|
||||
elif not is_post_altair(spec):
|
||||
proposer_indices = [a.proposer_index for a in post_state.previous_epoch_attestations]
|
||||
if spec.is_in_inactivity_leak(post_state):
|
||||
# Proposers can still make money during a leak before LIGHTCLIENT_PATCH
|
||||
if index in proposer_indices and index in attesting_indices:
|
||||
assert post_state.balances[index] > pre_state.balances[index]
|
||||
elif index in attesting_indices:
|
||||
# If not proposer but participated optimally, should have exactly neutral balance
|
||||
assert post_state.balances[index] == pre_state.balances[index]
|
||||
else:
|
||||
assert post_state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
if index in attesting_indices:
|
||||
assert post_state.balances[index] > pre_state.balances[index]
|
||||
else:
|
||||
assert post_state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
if spec.is_in_inactivity_leak(post_state):
|
||||
if index in attesting_indices:
|
||||
# If not proposer but participated optimally, should have exactly neutral balance
|
||||
assert post_state.balances[index] == pre_state.balances[index]
|
||||
else:
|
||||
assert post_state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
if index in attesting_indices:
|
||||
assert post_state.balances[index] > pre_state.balances[index]
|
||||
else:
|
||||
assert post_state.balances[index] < pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_genesis_epoch_no_attestations_no_penalties(spec, state):
|
||||
@@ -65,7 +103,7 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_attestations_random_incorrect_fields(spec, state):
|
||||
attestations = prepare_state_with_attestations(spec, state)
|
||||
@@ -99,19 +137,10 @@ def test_full_attestations_misc_balances(spec, state):
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
assert len(attesting_indices) > 0
|
||||
assert len(attesting_indices) != len(pre_state.validators)
|
||||
assert any(v.effective_balance != spec.MAX_EFFECTIVE_BALANCE for v in state.validators)
|
||||
for index in range(len(pre_state.validators)):
|
||||
if index in attesting_indices:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
elif spec.is_active_validator(pre_state.validators[index], spec.compute_epoch_at_slot(state.slot)):
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
validate_resulting_balances(spec, pre_state, state, attestations)
|
||||
# Check if base rewards are consistent with effective balance.
|
||||
brs = {}
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
for index in attesting_indices:
|
||||
br = spec.get_base_reward(state, index)
|
||||
if br in brs:
|
||||
@@ -145,8 +174,7 @@ def test_no_attestations_all_penalties(spec, state):
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
||||
for index in range(len(pre_state.validators)):
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
validate_resulting_balances(spec, pre_state, state, [])
|
||||
|
||||
|
||||
def run_with_participation(spec, state, participation_fn):
|
||||
@@ -158,8 +186,6 @@ def run_with_participation(spec, state, participation_fn):
|
||||
return att_participants
|
||||
|
||||
attestations = prepare_state_with_attestations(spec, state, participation_fn=participation_tracker)
|
||||
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
@@ -167,21 +193,7 @@ def run_with_participation(spec, state, participation_fn):
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
assert len(attesting_indices) == len(participated)
|
||||
|
||||
for index in range(len(pre_state.validators)):
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
# Proposers can still make money during a leak
|
||||
if index in proposer_indices and index in participated:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
# If not proposer but participated optimally, should have exactly neutral balance
|
||||
elif index in attesting_indices:
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
else:
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
if index in participated:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
else:
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
validate_resulting_balances(spec, pre_state, state, attestations)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@@ -420,7 +432,8 @@ def test_attestations_some_slashed(spec, state):
|
||||
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
||||
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
||||
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
if not is_post_altair(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
@@ -429,10 +442,4 @@ def test_attestations_some_slashed(spec, state):
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
assert len(attesting_indices) > 0
|
||||
assert len(attesting_indices_before_slashings) - len(attesting_indices) == spec.MIN_PER_EPOCH_CHURN_LIMIT
|
||||
for index in range(len(pre_state.validators)):
|
||||
if index in attesting_indices:
|
||||
# non-slashed attester should gain reward
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
else:
|
||||
# Slashed non-proposer attester should have penalty
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
validate_resulting_balances(spec, pre_state, state, attestations)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases, is_post_altair
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
@@ -23,12 +23,19 @@ def slash_validators(spec, state, indices, out_epochs):
|
||||
] = total_slashed_balance
|
||||
|
||||
|
||||
def get_slashing_multiplier(spec):
|
||||
if is_post_altair(spec):
|
||||
return spec.PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR
|
||||
else:
|
||||
return spec.PROPORTIONAL_SLASHING_MULTIPLIER
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_max_penalties(spec, state):
|
||||
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
||||
slashed_count = min(
|
||||
(len(state.validators) // spec.PROPORTIONAL_SLASHING_MULTIPLIER) + 1,
|
||||
(len(state.validators) // get_slashing_multiplier(spec)) + 1,
|
||||
# Can't slash more than validator count!
|
||||
len(state.validators)
|
||||
)
|
||||
@@ -40,7 +47,7 @@ def test_max_penalties(spec, state):
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
total_penalties = sum(state.slashings)
|
||||
|
||||
assert total_balance // spec.PROPORTIONAL_SLASHING_MULTIPLIER <= total_penalties
|
||||
assert total_balance // get_slashing_multiplier(spec) <= total_penalties
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
@@ -50,7 +57,30 @@ def test_max_penalties(spec, state):
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_small_penalty(spec, state):
|
||||
def test_low_penalty(spec, state):
|
||||
# Slashed count is one tenth of validator set
|
||||
slashed_count = (len(state.validators) // 10) + 1
|
||||
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||
|
||||
slashed_indices = list(range(slashed_count))
|
||||
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
for i in slashed_indices:
|
||||
assert 0 < state.balances[i] < pre_state.balances[i]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_minimal_penalty(spec, state):
|
||||
#
|
||||
# When very few slashings, the resulting slashing penalty gets rounded down
|
||||
# to zero so the result of `process_slashings` is null
|
||||
#
|
||||
|
||||
# Just the bare minimum for this one validator
|
||||
state.balances[0] = state.validators[0].effective_balance = spec.EJECTION_BALANCE
|
||||
# All the other validators get the maximum.
|
||||
@@ -74,11 +104,13 @@ def test_small_penalty(spec, state):
|
||||
|
||||
expected_penalty = (
|
||||
state.validators[0].effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// total_balance
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
assert state.balances[0] == pre_slash_balances[0] - expected_penalty
|
||||
|
||||
assert expected_penalty == 0
|
||||
assert state.balances[0] == pre_slash_balances[0]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@@ -96,7 +128,7 @@ def test_scaled_penalties(spec, state):
|
||||
state.slashings[5] = base + (incr * 6)
|
||||
state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
|
||||
|
||||
slashed_count = len(state.validators) // (spec.PROPORTIONAL_SLASHING_MULTIPLIER + 1)
|
||||
slashed_count = len(state.validators) // (get_slashing_multiplier(spec) + 1)
|
||||
|
||||
assert slashed_count > 10
|
||||
|
||||
@@ -134,7 +166,7 @@ def test_scaled_penalties(spec, state):
|
||||
v = state.validators[i]
|
||||
expected_penalty = (
|
||||
v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (spec.PROPORTIONAL_SLASHING_MULTIPLIER * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// (total_balance)
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_slashings_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_slashings_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_flush_slashings(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0
|
||||
|
||||
yield from run_process_slashings_reset(spec, state)
|
||||
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
|
||||
@@ -1,10 +1,22 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
spec_state_test,
|
||||
with_all_phases,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
add_attestation_to_store,
|
||||
add_block_to_store, get_anchor_root,
|
||||
get_genesis_forkchoice_store,
|
||||
tick_and_run_on_attestation,
|
||||
tick_and_run_on_block,
|
||||
get_anchor_root,
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
get_formatted_head_output,
|
||||
on_tick_and_append_step,
|
||||
run_on_block,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
@@ -15,119 +27,180 @@ from eth2spec.test.helpers.state import (
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_genesis(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'genesis_time': int(store.genesis_time),
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_chain_no_attestations(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||
block_1 = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state, block_1)
|
||||
add_block_to_store(spec, store, signed_block_1)
|
||||
yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps)
|
||||
|
||||
# On receiving a block of next epoch
|
||||
block_2 = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
|
||||
add_block_to_store(spec, store, signed_block_2)
|
||||
yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_split_tie_breaker_no_attestations(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# block at slot 1
|
||||
block_1_state = genesis_state.copy()
|
||||
block_1 = build_empty_block_for_next_slot(spec, block_1_state)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1)
|
||||
add_block_to_store(spec, store, signed_block_1)
|
||||
yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps)
|
||||
|
||||
# additional block at slot 1
|
||||
block_2_state = genesis_state.copy()
|
||||
block_2 = build_empty_block_for_next_slot(spec, block_2_state)
|
||||
block_2.body.graffiti = b'\x42' * 32
|
||||
signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2)
|
||||
add_block_to_store(spec, store, signed_block_2)
|
||||
yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
|
||||
|
||||
assert spec.get_head(store) == highest_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_shorter_chain_but_heavier_weight(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# build longer tree
|
||||
long_state = genesis_state.copy()
|
||||
for _ in range(3):
|
||||
long_block = build_empty_block_for_next_slot(spec, long_state)
|
||||
signed_long_block = state_transition_and_sign_block(spec, long_state, long_block)
|
||||
add_block_to_store(spec, store, signed_long_block)
|
||||
yield from tick_and_run_on_block(spec, store, signed_long_block, test_steps)
|
||||
|
||||
# build short tree
|
||||
short_state = genesis_state.copy()
|
||||
short_block = build_empty_block_for_next_slot(spec, short_state)
|
||||
short_block.body.graffiti = b'\x42' * 32
|
||||
signed_short_block = state_transition_and_sign_block(spec, short_state, short_block)
|
||||
add_block_to_store(spec, store, signed_short_block)
|
||||
yield from tick_and_run_on_block(spec, store, signed_short_block, test_steps)
|
||||
|
||||
short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True)
|
||||
add_attestation_to_store(spec, store, short_attestation)
|
||||
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
|
||||
|
||||
assert spec.get_head(store) == spec.hash_tree_root(short_block)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_filtered_block_tree(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# transition state past initial couple of epochs
|
||||
next_epoch(spec, state)
|
||||
next_epoch(spec, state)
|
||||
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
# fill in attestations for entire epoch, justifying the recent epoch
|
||||
prev_state, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False)
|
||||
attestations = [
|
||||
attestation for signed_block in signed_blocks
|
||||
for attestation in signed_block.message.body.attestations
|
||||
]
|
||||
assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch
|
||||
|
||||
# tick time forward and add blocks and attestations to store
|
||||
current_time = state.slot * spec.SECONDS_PER_SLOT + store.genesis_time
|
||||
spec.on_tick(store, current_time)
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
for signed_block in signed_blocks:
|
||||
spec.on_block(store, signed_block)
|
||||
for attestation in attestations:
|
||||
spec.on_attestation(store, attestation)
|
||||
yield from run_on_block(spec, store, signed_block, test_steps)
|
||||
|
||||
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||
|
||||
@@ -135,6 +208,13 @@ def test_filtered_block_tree(spec, state):
|
||||
expected_head_root = spec.hash_tree_root(signed_blocks[-1].message)
|
||||
assert spec.get_head(store) == expected_head_root
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.hash_tree_root()),
|
||||
}
|
||||
})
|
||||
|
||||
#
|
||||
# create branch containing the justified block but not containing enough on
|
||||
# chain votes to justify that block
|
||||
@@ -164,12 +244,20 @@ def test_filtered_block_tree(spec, state):
|
||||
|
||||
# tick time forward to be able to include up to the latest attestation
|
||||
current_time = (attestations[-1].data.slot + 1) * spec.SECONDS_PER_SLOT + store.genesis_time
|
||||
spec.on_tick(store, current_time)
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
|
||||
# include rogue block and associated attestations in the store
|
||||
spec.on_block(store, signed_rogue_block)
|
||||
yield from run_on_block(spec, store, signed_rogue_block, test_steps)
|
||||
|
||||
for attestation in attestations:
|
||||
spec.on_attestation(store, attestation)
|
||||
yield from tick_and_run_on_attestation(spec, store, attestation, test_steps)
|
||||
|
||||
# ensure that get_head still returns the head from the previous branch
|
||||
assert spec.get_head(store) == expected_head_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store)
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
@@ -1,14 +1,29 @@
|
||||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
single_phase,
|
||||
spec_test,
|
||||
with_configs,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_full_genesis_deposits,
|
||||
prepare_random_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(([PHASE0]))
|
||||
def get_post_altair_description(spec):
|
||||
return f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_initialize_beacon_state_from_eth1(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
@@ -38,10 +53,14 @@ def test_initialize_beacon_state_from_eth1(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_initialize_beacon_state_some_small_balances(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||
spec, spec.MAX_EFFECTIVE_BALANCE,
|
||||
@@ -79,10 +98,14 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_initialize_beacon_state_one_topup_activation(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
# Submit all but one deposit as MAX_EFFECTIVE_BALANCE
|
||||
main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||
@@ -125,10 +148,14 @@ def test_initialize_beacon_state_one_topup_activation(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_initialize_beacon_state_random_invalid_genesis(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
# Make a bunch of random deposits
|
||||
deposits, _, deposit_data_list = prepare_random_genesis_deposits(
|
||||
spec,
|
||||
@@ -149,10 +176,14 @@ def test_initialize_beacon_state_random_invalid_genesis(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_initialize_beacon_state_random_valid_genesis(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
# Make a bunch of random deposits
|
||||
random_deposits, _, deposit_data_list = prepare_random_genesis_deposits(
|
||||
spec,
|
||||
|
||||
@@ -1,9 +1,20 @@
|
||||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
spec_test,
|
||||
single_phase,
|
||||
with_configs,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_full_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
def get_post_altair_description(spec):
|
||||
return f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
|
||||
|
||||
|
||||
def create_valid_beacon_state(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
@@ -30,39 +41,55 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
||||
assert is_valid == valid
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_is_valid_genesis_state_true(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
state = create_valid_beacon_state(spec)
|
||||
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.genesis_time = spec.MIN_GENESIS_TIME - 1
|
||||
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
@@ -78,10 +105,14 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||
if is_post_altair(spec):
|
||||
yield 'description', 'meta', get_post_altair_description(spec)
|
||||
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ def test_full_but_partial_participation(spec, state):
|
||||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_one_attestation_one_correct(spec, state):
|
||||
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
|
||||
@@ -75,7 +75,7 @@ def test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@@ -86,7 +86,7 @@ def test_full_half_correct_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@@ -97,7 +97,7 @@ def test_full_correct_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@@ -108,7 +108,7 @@ def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_correct_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@@ -119,31 +119,31 @@ def test_full_half_incorrect_target_correct_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_one_slot(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_one_slot(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_max_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_max_slots(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_mixed_delay(spec, state):
|
||||
yield from rewards_helpers.run_test_full_mixed_delay(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_proposer_not_in_attestations(spec, state):
|
||||
yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_duplicate_attestations_at_later_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
@@ -38,7 +38,7 @@ def test_full_but_partial_participation_leak(spec, state):
|
||||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_one_attestation_one_correct_leak(spec, state):
|
||||
@@ -87,7 +87,7 @@ def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||
@@ -99,7 +99,7 @@ def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||
@@ -111,7 +111,7 @@ def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||
@@ -123,7 +123,7 @@ def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_correct_head_leak(spec, state):
|
||||
|
||||
@@ -29,6 +29,12 @@ def test_full_random_2(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_full_random_3(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
|
||||
@@ -35,6 +35,7 @@ from eth2spec.test.context import (
|
||||
with_configs,
|
||||
with_custom_state,
|
||||
large_validator_set,
|
||||
is_post_altair,
|
||||
)
|
||||
|
||||
|
||||
@@ -780,15 +781,19 @@ def test_attestation(spec, state):
|
||||
spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True
|
||||
)
|
||||
|
||||
if not is_post_altair(spec):
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
|
||||
# Add to state via block transition
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
attestation_block.body.attestations.append(attestation)
|
||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
|
||||
# Epoch transition should move to previous_epoch_attestations
|
||||
pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
|
||||
if not is_post_altair(spec):
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
# Epoch transition should move to previous_epoch_attestations
|
||||
pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
|
||||
else:
|
||||
pre_current_epoch_participation_root = spec.hash_tree_root(state.current_epoch_participation)
|
||||
|
||||
epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_epoch_block = state_transition_and_sign_block(spec, state, epoch_block)
|
||||
@@ -796,8 +801,13 @@ def test_attestation(spec, state):
|
||||
yield 'blocks', [signed_attestation_block, signed_epoch_block]
|
||||
yield 'post', state
|
||||
|
||||
assert len(state.current_epoch_attestations) == 0
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
if not is_post_altair(spec):
|
||||
assert len(state.current_epoch_attestations) == 0
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
assert state.current_epoch_participation[index] == spec.ParticipationFlags(0b0000_0000)
|
||||
assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root
|
||||
|
||||
|
||||
# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, ALTAIR, with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
|
||||
@@ -18,12 +18,12 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
if spec.fork == PHASE0:
|
||||
if spec.fork in (PHASE0, ALTAIR):
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
)
|
||||
else:
|
||||
elif spec.fork == PHASE1:
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
@@ -12,7 +13,7 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_on_time_success(spec, state):
|
||||
@@ -23,7 +24,7 @@ def test_on_time_success(spec, state):
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_success(spec, state):
|
||||
|
||||
@@ -9,6 +9,7 @@ from eth2spec.test.helpers.attestations import (
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
expect_assertion_error,
|
||||
disable_process_reveal_deadlines,
|
||||
@@ -68,7 +69,7 @@ def run_custody_chunk_response_processing(spec, state, custody_response, valid=T
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@disable_process_reveal_deadlines
|
||||
@@ -92,7 +93,7 @@ def test_challenge_appended(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -118,7 +119,7 @@ def test_challenge_empty_element_replaced(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -144,7 +145,7 @@ def test_duplicate_challenge(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -172,7 +173,7 @@ def test_second_challenge(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge1)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -197,7 +198,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -222,7 +223,7 @@ def test_many_epochs_custody(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -243,7 +244,7 @@ def test_off_chain_attestation(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -275,7 +276,7 @@ def test_custody_response(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -306,7 +307,7 @@ def test_custody_response_chunk_index_2(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -338,7 +339,7 @@ def test_custody_response_multiple_epochs(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@@ -39,7 +40,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@@ -49,7 +50,7 @@ def test_success(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_too_early(spec, state):
|
||||
@@ -58,7 +59,7 @@ def test_reveal_too_early(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_wrong_period(spec, state):
|
||||
@@ -67,7 +68,7 @@ def test_wrong_period(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_reveal(spec, state):
|
||||
@@ -77,7 +78,7 @@ def test_late_reveal(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_double_reveal(spec, state):
|
||||
|
||||
@@ -11,6 +11,7 @@ from eth2spec.test.helpers.state import get_balance, transition_to
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
MINIMAL,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@@ -112,7 +113,7 @@ def run_standard_custody_slashing_test(spec,
|
||||
yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -120,7 +121,7 @@ def test_custody_slashing(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -128,7 +129,7 @@ def test_incorrect_custody_slashing(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, correct=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -136,7 +137,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -144,7 +145,7 @@ def test_many_epochs_custody(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
|
||||
@@ -2,6 +2,7 @@ from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@@ -41,7 +42,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@@ -50,7 +51,7 @@ def test_success(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_current_epoch(spec, state):
|
||||
@@ -59,7 +60,7 @@ def test_reveal_from_current_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_past_epoch(spec, state):
|
||||
@@ -69,7 +70,7 @@ def test_reveal_from_past_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding(spec, state):
|
||||
@@ -81,7 +82,7 @@ def test_reveal_with_custody_padding(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
@@ -93,7 +94,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_double_reveal(spec, state):
|
||||
@@ -114,7 +115,7 @@ def test_double_reveal(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_revealer_is_slashed(spec, state):
|
||||
@@ -124,7 +125,7 @@ def test_revealer_is_slashed(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_far_future_epoch(spec, state):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
only_full_crosslink,
|
||||
spec_state_test,
|
||||
@@ -90,21 +91,21 @@ def run_successful_crosslink_tests(spec, state, target_len_offset_slot):
|
||||
assert bool(pending_attestation.crosslink_success) is True
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_basic_crosslinks(spec, state):
|
||||
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=1)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_multiple_offset_slots(spec, state):
|
||||
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=2)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_no_winning_root(spec, state):
|
||||
@@ -152,7 +153,7 @@ def test_no_winning_root(spec, state):
|
||||
assert state.shard_states == pre_shard_states
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_wrong_shard_transition_root(spec, state):
|
||||
|
||||
@@ -8,13 +8,14 @@ from eth2spec.test.helpers.attestations import (
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
@@ -25,7 +26,7 @@ def run_process_challenge_deadlines(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_slashed_after_chunk_challenge(spec, state):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
)
|
||||
from eth2spec.test.helpers.custody import (
|
||||
get_valid_chunk_challenge,
|
||||
@@ -16,7 +17,7 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
@@ -29,7 +30,7 @@ def run_process_custody_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_delay(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@@ -42,7 +43,7 @@ def test_validator_withdrawal_delay(spec, state):
|
||||
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@@ -67,7 +68,7 @@ def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@@ -116,7 +117,7 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
|
||||
@@ -4,12 +4,13 @@ from eth2spec.test.helpers.custody import (
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
|
||||
|
||||
|
||||
@@ -17,7 +18,7 @@ def run_process_challenge_deadlines(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||
@@ -37,7 +38,7 @@ def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||
assert state.validators[0].slashed == 1
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_not_slashed_after_reveal(spec, state):
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from typing import Dict, Sequence
|
||||
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, MINIMAL,
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
only_full_crosslink,
|
||||
@@ -98,7 +100,7 @@ def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, comm
|
||||
assert post_shard_state.gasprice > pre_gasprice
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||
@@ -112,7 +114,7 @@ def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||
@@ -131,7 +133,7 @@ def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||
@@ -165,7 +167,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL])
|
||||
def test_custody_key_reveal(spec, state):
|
||||
@@ -179,7 +181,7 @@ def test_custody_key_reveal(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_early_derived_secret_reveal(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@@ -190,7 +192,7 @@ def test_early_derived_secret_reveal(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_custody_slashing(spec, state):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
always_bls,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
@@ -43,7 +44,7 @@ def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state,
|
||||
shard_state.latest_block_root == pre_shard_state.latest_block_root
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -63,7 +64,7 @@ def test_valid_shard_block(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_shard_parent_root(spec, state):
|
||||
@@ -79,7 +80,7 @@ def test_invalid_shard_parent_root(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_beacon_parent_root(spec, state):
|
||||
@@ -94,7 +95,7 @@ def test_invalid_beacon_parent_root(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_slot(spec, state):
|
||||
@@ -110,7 +111,7 @@ def test_invalid_slot(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_proposer_index(spec, state):
|
||||
@@ -130,7 +131,7 @@ def test_invalid_proposer_index(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -151,7 +152,7 @@ def test_out_of_bound_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -170,7 +171,7 @@ def test_invalid_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -189,7 +190,7 @@ def test_empty_block_body(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -208,7 +209,7 @@ def test_invalid_signature(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@@ -225,7 +226,7 @@ def test_max_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
|
||||
from eth2spec.test.context import PHASE0, spec_state_test, with_all_phases_except, never_bls, only_full_crosslink
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
never_bls,
|
||||
only_full_crosslink,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||
from eth2spec.test.helpers.shard_block import (
|
||||
build_shard_block,
|
||||
@@ -145,7 +152,7 @@ def create_and_apply_beacon_and_shard_blocks(spec, state, store, shard, shard_bl
|
||||
return has_shard_committee
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@never_bls # Set to never_bls for testing `check_pending_shard_blocks`
|
||||
def test_basic(spec, state):
|
||||
@@ -206,7 +213,7 @@ def create_simple_fork(spec, state, store, shard):
|
||||
return head_block, forking_block
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_shard_simple_fork(spec, state):
|
||||
@@ -231,7 +238,7 @@ def test_shard_simple_fork(spec, state):
|
||||
assert spec.get_shard_head(store, shard) == forking_block.message.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_shard_latest_messages_for_different_shards(spec, state):
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
ALTAIR,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_get_committee_count_delta(spec, state):
|
||||
assert spec.get_committee_count_delta(state, 0, 0) == 0
|
||||
@@ -23,7 +24,7 @@ def test_get_committee_count_delta(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_current_epoch_start(spec, state):
|
||||
assert state.current_epoch_start_shard == 0
|
||||
@@ -39,7 +40,7 @@ def test_get_start_shard_current_epoch_start(spec, state):
|
||||
assert start_shard == state.current_epoch_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_next_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
@@ -57,7 +58,7 @@ def test_get_start_shard_next_slot(spec, state):
|
||||
assert start_shard == expected_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_previous_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
@@ -76,7 +77,7 @@ def test_get_start_shard_previous_slot(spec, state):
|
||||
assert start_shard == expected_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, ALTAIR])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_far_past_epoch(spec, state):
|
||||
initial_epoch = spec.get_current_epoch(state)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from typing import Dict, Any
|
||||
from eth2spec.debug.encode import encode
|
||||
from eth2spec.utils.ssz.ssz_typing import View
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
|
||||
@@ -39,24 +38,20 @@ def vector_test(description: str = None):
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, View):
|
||||
yield key, 'data', encode(value)
|
||||
yield key, 'ssz', serialize(value)
|
||||
elif isinstance(value, bytes):
|
||||
yield key, 'data', encode(value)
|
||||
yield key, 'ssz', value
|
||||
elif isinstance(value, list) and all([isinstance(el, (View, bytes)) for el in value]):
|
||||
for i, el in enumerate(value):
|
||||
if isinstance(el, View):
|
||||
yield f'{key}_{i}', 'data', encode(el)
|
||||
yield f'{key}_{i}', 'ssz', serialize(el)
|
||||
elif isinstance(el, bytes):
|
||||
yield f'{key}_{i}', 'data', encode(el)
|
||||
yield f'{key}_{i}', 'ssz', el
|
||||
yield f'{key}_count', 'meta', len(value)
|
||||
else:
|
||||
# Not a ssz value.
|
||||
# The data will now just be yielded as any python data,
|
||||
# something that should be encodeable by the generator runner.
|
||||
# something that should be encodable by the generator runner.
|
||||
yield key, 'data', value
|
||||
|
||||
# check generator mode, may be None/else.
|
||||
|
||||
@@ -5,4 +5,4 @@ from remerkleable.complex import Container, Vector, List
|
||||
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
|
||||
from remerkleable.bitfields import Bitvector, Bitlist
|
||||
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
||||
from remerkleable.core import BasicView, View
|
||||
from remerkleable.core import BasicView, View, Path
|
||||
|
||||
@@ -132,21 +132,25 @@ Cases are split up too. This enables diffing of parts of the test case, tracking
|
||||
|
||||
### `<output part>`
|
||||
|
||||
E.g. `pre.yaml`, `deposit.yaml`, `post.yaml`.
|
||||
|
||||
Diffing a `pre.yaml` and `post.yaml` provides all the information for testing, good for readability of the change.
|
||||
Then the difference between pre and post can be compared to anything that changes the pre state, e.g. `deposit.yaml`
|
||||
|
||||
These files allow for custom formats for some parts of the test. E.g. something encoded in SSZ.
|
||||
Or to avoid large files, the SSZ can be compressed with Snappy.
|
||||
E.g. `pre.ssz_snappy`, `deposit.ssz_snappy`, `post.ssz_snappy`.
|
||||
|
||||
Some yaml files have copies, but formatted as raw SSZ bytes: `pre.ssz`, `deposit.ssz`, `post.ssz`.
|
||||
The yaml files are intended to be deprecated, and clients should shift to ssz inputs for efficiency.
|
||||
Deprecation will start once a viewer of SSZ test-cases is in place, to maintain a standard of readable test cases.
|
||||
This also means that some clients can drop legacy YAML -> JSON/other -> SSZ work-arounds.
|
||||
(These were implemented to support the uint64 YAML, hex strings, etc. Things that were not idiomatic to their language.)
|
||||
Diffing a `pre.ssz_snappy` and `post.ssz_snappy` provides all the information for testing, when decompressed and decoded.
|
||||
Then the difference between pre and post can be compared to anything that changes the pre state, e.g. `deposit.ssz_snappy`
|
||||
|
||||
YAML is generally used for test metadata, and for tests that do not use SSZ: e.g. shuffling and BLS tests.
|
||||
In this case, there is no point in adding special SSZ types. And the size and efficiency of YAML is acceptable.
|
||||
|
||||
#### Common output formats
|
||||
|
||||
Between all types of tests, a few formats are common:
|
||||
|
||||
- **`.yaml`**: A YAML file containing structured data to describe settings or test contents.
|
||||
- **`.ssz`**: A file containing raw SSZ-encoded data. Previously widely used in tests, but replaced with compressed variant.
|
||||
- **`.ssz_snappy`**: Like `.ssz`, but compressed with Snappy block compression.
|
||||
Snappy block compression is already applied to SSZ in Eth2 gossip, available in client implementations, and thus chosen as compression method.
|
||||
|
||||
Yaml will not be deprecated for tests that do not use SSZ: e.g. shuffling and BLS tests.
|
||||
In this case, there is no work around for loading necessary anyway, and the size and efficiency of yaml is acceptable.
|
||||
|
||||
#### Special output parts
|
||||
|
||||
|
||||
@@ -15,18 +15,13 @@ description: string -- Optional description of test case, purely for debuggin
|
||||
bls_setting: int -- see general test-format spec.
|
||||
```
|
||||
|
||||
### `pre.yaml`
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the epoch sub-transition.
|
||||
An SSZ-snappy encoded `BeaconState`, the state before running the epoch sub-transition.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
### `post.ssz_snappy`
|
||||
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the epoch sub-transition.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
An SSZ-snappy encoded `BeaconState`, the state after applying the epoch sub-transition.
|
||||
|
||||
## Condition
|
||||
|
||||
@@ -37,10 +32,17 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
- `justification_and_finalization`
|
||||
- `rewards_and_penalties` (limited to `minimal` config)
|
||||
- `rewards_and_penalties`
|
||||
- `registry_updates`
|
||||
- `slashings`
|
||||
- `final_updates`
|
||||
- `eth1_data_reset`
|
||||
- `effective_balance_updates`
|
||||
- `slashings_reset`
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `participation_record_updates`
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
||||
@@ -14,11 +14,11 @@ bls_setting: int -- see general test-format spec.
|
||||
blocks_count: int -- the number of blocks processed in this test.
|
||||
```
|
||||
|
||||
### `pre.yaml`
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the block transitions.
|
||||
An SSZ-snappy encoded `BeaconState`, the state before running the block transitions.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
Also available as `pre.ssz_snappy`.
|
||||
|
||||
|
||||
### `blocks_<index>.yaml`
|
||||
@@ -28,13 +28,11 @@ A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to b
|
||||
|
||||
Each file is a YAML-encoded `SignedBeaconBlock`.
|
||||
|
||||
Each block is also available as `blocks_<index>.ssz`
|
||||
Each block is also available as `blocks_<index>.ssz_snappy`
|
||||
|
||||
### `post.yaml`
|
||||
### `post.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the block transitions.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
An SSZ-snappy encoded `BeaconState`, the state after applying the block transitions.
|
||||
|
||||
|
||||
## Condition
|
||||
|
||||
111
tests/formats/fork_choice/README.md
Normal file
111
tests/formats/fork_choice/README.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Fork choice tests
|
||||
|
||||
The aim of the fork choice tests is to provide test coverage of the various components of the fork choice.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
description: string -- Optional. Description of test case, purely for debugging purposes.
|
||||
bls_setting: int -- see general test-format spec.
|
||||
```
|
||||
|
||||
### `anchor_state.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `BeaconState`, the state to initialize store with `get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock)` helper.
|
||||
|
||||
### `anchor_block.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `BeaconBlock`, the block to initialize store with `get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock)` helper.
|
||||
|
||||
### `steps.yaml`
|
||||
|
||||
The steps to execute in sequence. There may be multiple items of the following types:
|
||||
|
||||
#### `on_tick` execution step
|
||||
|
||||
The parameter that is required for executing `on_tick(store, time)`.
|
||||
|
||||
```yaml
|
||||
{ tick: int } -- to execute `on_tick(store, time)`
|
||||
```
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
#### `on_attestation` execution step
|
||||
|
||||
The parameter that is required for executing `on_attestation(store, attestation)`.
|
||||
|
||||
```yaml
|
||||
{ attestation: string } -- the name of the `attestation_<32-byte-root>.ssz_snappy` file. To execute `on_attestation(store, attestation)` with the given attestation.
|
||||
```
|
||||
The file is located in the same folder (see below).
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
#### `on_block` execution step
|
||||
|
||||
The parameter that is required for executing `on_block(store, block)`.
|
||||
|
||||
```yaml
|
||||
{ block: string } -- the name of the `block_<32-byte-root>.ssz_snappy` file. To execute `on_block(store, block)` with the given attestation.
|
||||
```
|
||||
The file is located in the same folder (see below).
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
#### Checks step
|
||||
|
||||
The checks to verify the current status of `store`.
|
||||
|
||||
```yaml
|
||||
checks: {<store_attibute>: value} -- the assertions.
|
||||
```
|
||||
|
||||
`<store_attibute>` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. Currently, the possible fields included:
|
||||
|
||||
```yaml
|
||||
head: { -- Encoded 32-byte value from get_head(store)
|
||||
slot: slot,
|
||||
root: string,
|
||||
}
|
||||
time: int -- store.time
|
||||
genesis_time: int -- store.genesis_time
|
||||
justified_checkpoint_root: string -- Encoded 32-byte value from store.justified_checkpoint.root
|
||||
finalized_checkpoint_root: string -- Encoded 32-byte value from store.finalized_checkpoint.root
|
||||
best_justified_checkpoint_root: string -- Encoded 32-byte value from store.best_justified_checkpoint.root
|
||||
```
|
||||
|
||||
For example:
|
||||
```yaml
|
||||
- checks:
|
||||
time: 144
|
||||
genesis_time: 0
|
||||
head: {slot: 17, root: '0xd2724c86002f7e1f8656ab44a341a409ad80e6e70a5225fd94835566deebb66f'}
|
||||
justified_checkpoint_root: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
||||
finalized_checkpoint_root: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
||||
best_justified_checkpoint: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
||||
```
|
||||
|
||||
*Note*: Each `checks` step may include one or multiple items. Each item has to be checked against the current store.
|
||||
|
||||
### `attestation_<32-byte-root>.ssz_snappy`
|
||||
|
||||
`<32-byte-root>` is the hash tree root of the given attestation.
|
||||
|
||||
Each file is an SSZ-snappy encoded `Attestation`.
|
||||
|
||||
### `block_<32-byte-root>.ssz_snappy`
|
||||
|
||||
`<32-byte-root>` is the hash tree root of the given block.
|
||||
|
||||
Each file is an SSZ-snappy encoded `SignedBeaconBlock`.
|
||||
|
||||
## Condition
|
||||
|
||||
1. Deserialize `anchor_state.ssz_snappy` and `anchor_block.ssz_snappy` to initialize the local store object by with `get_forkchoice_store(anchor_state, anchor_block)` helper.
|
||||
2. Iterate sequentially through `steps.yaml`
|
||||
- For each execution, look up the corresponding ssz_snappy file. Execute the corresponding helper function on the current store.
|
||||
- For the `on_block` execution step: if `len(block.message.body.attestations) > 0`, execute each attestation with `on_attestation(store, attestation)` after executing `on_block(store, block)`.
|
||||
- For each `checks` step, the assertions on the current store must be satisfied.
|
||||
43
tests/formats/forks/README.md
Normal file
43
tests/formats/forks/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Forks
|
||||
|
||||
The aim of the fork tests is to ensure that a pre-fork state can be transformed
|
||||
into a valid post-fork state, utilizing the `upgrade` function found in the relevant `fork.md` spec.
|
||||
|
||||
There is only one handler: `fork`. Each fork (after genesis) is handled with the same format,
|
||||
and the particular fork boundary being tested is noted in `meta.yaml`.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
A yaml file to signify which fork boundary is being tested.
|
||||
|
||||
```yaml
|
||||
fork: str -- Fork being transitioned to
|
||||
```
|
||||
|
||||
#### Fork strings
|
||||
|
||||
Key of valid `fork` strings that might be found in `meta.yaml`
|
||||
|
||||
| String ID | Pre-fork | Post-fork | Function |
|
||||
| - | - | - | - |
|
||||
| `altair` | Phase 0 | Altair | `upgrade_to_altair` |
|
||||
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `BeaconState`, the state before running the fork transition.
|
||||
|
||||
### `post.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `BeaconState`, the state after applying the fork transition.
|
||||
|
||||
*Note*: This type is the `BeaconState` after the fork and is *not* the same type as `pre`.
|
||||
|
||||
## Processing
|
||||
|
||||
To process this test, pass `pre` into the upgrade function defined by the `fork` in `meta.yaml`.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post`.
|
||||
@@ -4,11 +4,9 @@ Tests the initialization of a genesis state based on Eth1 data.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `eth1_block_hash.yaml`
|
||||
### `eth1_block_hash.ssz_snappy`
|
||||
|
||||
A `Bytes32` hex encoded, with prefix 0x. The root of the Eth1 block.
|
||||
|
||||
Also available as `eth1_block_hash.ssz`.
|
||||
An SSZ-snappy encoded root of the Eth1 block.
|
||||
|
||||
### `eth1_timestamp.yaml`
|
||||
|
||||
@@ -19,21 +17,19 @@ An integer. The timestamp of the block, in seconds.
|
||||
A yaml file to help read the deposit count:
|
||||
|
||||
```yaml
|
||||
description: string -- Optional. Description of test case, purely for debugging purposes.
|
||||
deposits_count: int -- Amount of deposits.
|
||||
```
|
||||
|
||||
### `deposits_<index>.yaml`
|
||||
### `deposits_<index>.ssz_snappy`
|
||||
|
||||
A series of files, with `<index>` in range `[0, deposits_count)`. Deposits need to be processed in order.
|
||||
Each file is a YAML-encoded `Deposit` object.
|
||||
Each file is a SSZ-snappy encoded `Deposit` object.
|
||||
|
||||
Each deposit is also available as `deposits_<index>.ssz`.
|
||||
### `state.ssz_snappy`
|
||||
|
||||
### `state.yaml`
|
||||
The expected genesis state. An SSZ-snappy encoded `BeaconState` object.
|
||||
|
||||
The expected genesis state. A YAML-encoded `BeaconState` object.
|
||||
|
||||
Also available as `state.ssz`.
|
||||
|
||||
## Processing
|
||||
|
||||
|
||||
@@ -4,11 +4,18 @@ Tests if a genesis state is valid, i.e. if it counts as trigger to launch.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `genesis.yaml`
|
||||
### `meta.yaml`
|
||||
|
||||
A `BeaconState`, the state to validate as genesis candidate.
|
||||
A yaml file to help read the deposit count:
|
||||
|
||||
```yaml
|
||||
description: string -- Optional. Description of test case, purely for debugging purposes.
|
||||
```
|
||||
|
||||
### `genesis.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `BeaconState`, the state to validate as genesis candidate.
|
||||
|
||||
Also available as `genesis.ssz`.
|
||||
|
||||
### `is_valid.yaml`
|
||||
|
||||
|
||||
@@ -12,23 +12,17 @@ description: string -- Optional description of test case, purely for debuggin
|
||||
bls_setting: int -- see general test-format spec.
|
||||
```
|
||||
|
||||
### `pre.yaml`
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before applying the operation.
|
||||
An SSZ-snappy encoded `BeaconState`, the state before applying the operation.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
### `<input-name>.ssz_snappy`
|
||||
|
||||
### `<input-name>.yaml`
|
||||
An SSZ-snappy encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`.
|
||||
|
||||
A YAML-encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`.
|
||||
### `post.ssz_snappy`
|
||||
|
||||
Also available as `<input-name>.ssz`.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the operation. No value if operation processing is aborted.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
An SSZ-snappy encoded `BeaconState`, the state after applying the operation. No value if operation processing is aborted.
|
||||
|
||||
|
||||
## Condition
|
||||
@@ -39,14 +33,15 @@ This excludes the other parts of the block-transition.
|
||||
|
||||
Operations:
|
||||
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|--------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|-----------------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
|
||||
|
||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||
|
||||
|
||||
@@ -23,41 +23,29 @@ description: string -- Optional description of test case, purely for debuggin
|
||||
_Note_: No signature verification happens within rewards sub-functions. These
|
||||
tests can safely be run with or without BLS enabled.
|
||||
|
||||
### `pre.yaml`
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the rewards sub-function.
|
||||
An SSZ-snappy encoded `BeaconState`, the state before running the rewards sub-function.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
### `source_deltas.ssz_snappy`
|
||||
|
||||
### `source_deltas.yaml`
|
||||
An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_source_deltas` function
|
||||
|
||||
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_source_deltas` function
|
||||
### `target_deltas.ssz_snappy`
|
||||
|
||||
Also available as `source_deltas.ssz`.
|
||||
An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_target_deltas` function
|
||||
|
||||
### `target_deltas.yaml`
|
||||
### `head_deltas.ssz_snappy`
|
||||
|
||||
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_target_deltas` function
|
||||
An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_head_deltas` function
|
||||
|
||||
Also available as `target_deltas.ssz`.
|
||||
### `inclusion_delay_deltas.ssz_snappy`
|
||||
|
||||
### `head_deltas.yaml`
|
||||
An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inclusion_delay_deltas` function
|
||||
|
||||
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_head_deltas` function
|
||||
### `inactivity_penalty_deltas.ssz_snappy`
|
||||
|
||||
Also available as `head_deltas.ssz`.
|
||||
|
||||
### `inclusion_delay_deltas.yaml`
|
||||
|
||||
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inclusion_delay_deltas` function
|
||||
|
||||
Also available as `inclusion_delay_deltas.ssz`.
|
||||
|
||||
### `inactivity_penalty_deltas.yaml`
|
||||
|
||||
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inactivity_penalty_deltas` function
|
||||
|
||||
Also available as `inactivity_penalty_deltas.ssz`.
|
||||
An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inactivity_penalty_deltas` function
|
||||
|
||||
## Condition
|
||||
|
||||
|
||||
@@ -14,27 +14,21 @@ blocks_count: int -- the number of blocks processed in this test.
|
||||
```
|
||||
|
||||
|
||||
### `pre.yaml`
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the block transitions.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
An SSZ-snappy encoded `BeaconState`, the state before running the block transitions.
|
||||
|
||||
|
||||
### `blocks_<index>.yaml`
|
||||
### `blocks_<index>.ssz_snappy`
|
||||
|
||||
A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order,
|
||||
following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
|
||||
|
||||
Each file is a YAML-encoded `SignedBeaconBlock`.
|
||||
Each file is a SSZ-snappy encoded `SignedBeaconBlock`.
|
||||
|
||||
Each block is also available as `blocks_<index>.ssz`
|
||||
### `post.ssz_snappy`
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the block transitions.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
An SSZ-snappy encoded `BeaconState`, the state after applying the block transitions.
|
||||
|
||||
|
||||
## Condition
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user