diff --git a/.circleci/config.yml b/.circleci/config.yml index 4f806b00f..6e9a77c49 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 commands: restore_cached_venv: - description: "Restores a cached venv" + description: "Restore a cached venv" parameters: reqs_checksum: type: string @@ -16,7 +16,7 @@ commands: # fallback to using the latest cache if no exact match is found - << parameters.venv_name >>-venv- save_cached_venv: - description: "Saves a venv into a cache" + description: "Save a venv into a cache" parameters: reqs_checksum: type: string @@ -31,6 +31,32 @@ commands: - save_cache: key: << parameters.venv_name >>-venv-<< parameters.reqs_checksum >> paths: << parameters.venv_path >> + restore_pyspec_cached_venv: + description: "Restore the cache with pyspec keys" + steps: + - restore_cached_venv: + venv_name: v2-pyspec + reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }} + save_pyspec_cached_venv: + description: Save a venv into a cache with pyspec keys" + steps: + - save_cached_venv: + venv_name: v2-pyspec + reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }} + venv_path: ./test_libs/pyspec/venv + restore_deposit_contract_cached_venv: + description: "Restore the cache with deposit_contract keys" + steps: + - restore_cached_venv: + venv_name: v4-deposit-contract + reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }} + save_deposit_contract_cached_venv: + description: Save a venv into a cache with deposit_contract keys" + steps: + - save_cached_venv: + venv_name: v4-deposit-contract + reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }} + venv_path: ./deposit_contract/venv jobs: checkout_specs: docker: @@ -52,23 +78,18 @@ jobs: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo - install_test: + install_pyspec_test: docker: - image: circleci/python:3.6 working_directory: ~/specs-repo steps: - restore_cache: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - - restore_cached_venv: - venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' + - restore_pyspec_cached_venv - run: name: Install pyspec requirements command: make install_test - - save_cached_venv: - venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' - venv_path: ./test_libs/pyspec/venv + - save_pyspec_cached_venv test: docker: - image: circleci/python:3.6 @@ -76,22 +97,63 @@ jobs: steps: - restore_cache: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - - restore_cached_venv: - venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' + - restore_pyspec_cached_venv - run: name: Run py-tests command: make citest - store_test_results: path: test_libs/pyspec/test-reports + lint: + docker: + - image: circleci/python:3.6 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_pyspec_cached_venv + - run: + name: Run linter + command: make lint + install_deposit_contract_test: + docker: + - image: circleci/python:3.6 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_deposit_contract_cached_venv + - run: + name: Install deposit contract requirements + command: make install_deposit_contract_test + - save_deposit_contract_cached_venv + deposit_contract: + docker: + - image: circleci/python:3.6 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_deposit_contract_cached_venv + - run: + name: Run deposit contract test + command: make test_deposit_contract workflows: version: 2.1 test_spec: jobs: - checkout_specs - - install_test: + - install_pyspec_test: requires: - checkout_specs - test: requires: - - install_test + - install_pyspec_test + - lint: + requires: + - test + - install_deposit_contract_test: + requires: + - checkout_specs + - deposit_contract: + requires: + - install_deposit_contract_test diff --git a/.gitignore b/.gitignore index 3dd86fc80..c6b39955f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ eth2.0-spec-tests/ # Dynamically built from Markdown spec test_libs/pyspec/eth2spec/phase0/spec.py +test_libs/pyspec/eth2spec/phase1/spec.py diff --git a/Makefile b/Makefile index 86303680d..f79b89dad 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,7 @@ TEST_LIBS_DIR = ./test_libs PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec YAML_TEST_DIR = ./eth2.0-spec-tests/tests GENERATOR_DIR = ./test_generators +DEPOSIT_CONTRACT_DIR = ./deposit_contract CONFIGS_DIR = ./configs # Collect a list of generator names @@ -13,10 +14,15 @@ YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERAT GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py -PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) +PY_SPEC_PHASE_0_DEPS = $(SPEC_DIR)/core/0_*.md + +PY_SPEC_PHASE_1_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase1/spec.py +PY_SPEC_PHASE_1_DEPS = $(SPEC_DIR)/core/1_*.md + +PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS) -.PHONY: clean all test citest gen_yaml_tests pyspec phase0 install_test +.PHONY: clean all test citest lint gen_yaml_tests pyspec phase0 phase1 install_test install_deposit_contract_test test_deposit_contract compile_deposit_contract all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) @@ -25,6 +31,7 @@ clean: rm -rf $(GENERATOR_VENVS) rm -rf $(PY_SPEC_DIR)/venv $(PY_SPEC_DIR)/.pytest_cache rm -rf $(PY_SPEC_ALL_TARGETS) + rm -rf $(DEPOSIT_CONTRACT_DIR)/venv $(DEPOSIT_CONTRACT_DIR)/.pytest_cache # "make gen_yaml_tests" to run generators gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS) @@ -37,18 +44,32 @@ test: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest eth2spec citest: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml . + cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; \ + python -m pytest --junitxml=test-reports/eth2spec/test_results_phase0.xml eth2spec + +lint: $(PY_SPEC_ALL_TARGETS) + cd $(PY_SPEC_DIR); . venv/bin/activate; \ + flake8 --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec; + +install_deposit_contract_test: $(PY_SPEC_ALL_TARGETS) + cd $(DEPOSIT_CONTRACT_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt + +compile_deposit_contract: + cd $(DEPOSIT_CONTRACT_DIR); . venv/bin/activate; \ + python tool/compile_deposit_contract.py contracts/validator_registration.v.py; + +test_deposit_contract: + cd $(DEPOSIT_CONTRACT_DIR); . venv/bin/activate; \ + python -m pytest . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) -# "make phase0" to create pyspec for phase0 -phase0: $(PY_SPEC_PHASE_0_TARGETS) - - -$(PY_SPEC_DIR)/eth2spec/phase0/spec.py: - python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ +$(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS) + python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $@ +$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS) + python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $@ CURRENT_DIR = ${CURDIR} diff --git a/README.md b/README.md index ad7204f21..2f32df03f 100644 --- a/README.md +++ b/README.md @@ -2,20 +2,20 @@ [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). +To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). -This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests. +This repository hosts the current Eth 2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. ## Specs -Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: +Core specifications for Eth 2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: ### Phase 0 * [The Beacon Chain](specs/core/0_beacon-chain.md) * [Fork Choice](specs/core/0_fork-choice.md) * [Deposit Contract](specs/core/0_deposit-contract.md) -* [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) +* [Honest Validator](specs/validator/0_beacon-chain-validator.md) ### Phase 1 * [Custody Game](specs/core/1_custody-game.md) @@ -28,9 +28,10 @@ Core specifications for eth2.0 client validation can be found in [specs/core](sp * [General test format](specs/test_formats/README.md) * [Merkle proof formats](specs/light_client/merkle_proofs.md) * [Light client syncing protocol](specs/light_client/sync_protocol.md) +* [Beacon node API for validator](specs/validator/0_beacon-node-validator-api.md) -### Design goals +## Design goals The following are the broad design goals for Ethereum 2.0: * to minimize complexity, even at the cost of some losses in efficiency diff --git a/configs/constant_presets/README.md b/configs/constant_presets/README.md index 45148862e..61c9a3a63 100644 --- a/configs/constant_presets/README.md +++ b/configs/constant_presets/README.md @@ -10,11 +10,11 @@ Later-fork constants can be ignored, e.g. ignore phase1 constants as a client th Each preset is a key-value mapping. **Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant. -**Value**: can be any of: + +**Value** can be either: - an unsigned integer number, can be up to 64 bits (incl.) - a hexadecimal string, prefixed with `0x` Presets may contain comments to describe the values. -See `mainnet.yaml` for a complete example. - +See [`mainnet.yaml`](./mainnet.yaml) for a complete example. diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index 72d0fdc8f..6ac3f422f 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -72,7 +72,7 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 # 2**6 (= 64) epochs ~7 hours -MAX_CROSSLINK_EPOCHS: 64 +MAX_EPOCHS_PER_CROSSLINK: 64 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 @@ -124,4 +124,4 @@ DOMAIN_RANDAO: 1 DOMAIN_ATTESTATION: 2 DOMAIN_DEPOSIT: 3 DOMAIN_VOLUNTARY_EXIT: 4 -DOMAIN_TRANSFER: 5 \ No newline at end of file +DOMAIN_TRANSFER: 5 diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index 0a6cab687..73448c3c6 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -71,9 +71,11 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 # 2**6 (= 64) epochs ~7 hours -MAX_CROSSLINK_EPOCHS: 64 +MAX_EPOCHS_PER_CROSSLINK: 64 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 +# [customized] 2**12 (= 4,096) epochs 18 days +EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096 # State list lengths @@ -123,4 +125,4 @@ DOMAIN_RANDAO: 1 DOMAIN_ATTESTATION: 2 DOMAIN_DEPOSIT: 3 DOMAIN_VOLUNTARY_EXIT: 4 -DOMAIN_TRANSFER: 5 \ No newline at end of file +DOMAIN_TRANSFER: 5 diff --git a/configs/fork_timelines/README.md b/configs/fork_timelines/README.md index c93b415f5..da7445767 100644 --- a/configs/fork_timelines/README.md +++ b/configs/fork_timelines/README.md @@ -3,16 +3,17 @@ This directory contains a set of fork timelines used for testing, testnets, and mainnet. A timeline file contains all the forks known for its target. -Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports phase 0 currently. +Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports Phase 0 currently. ## Format Each preset is a key-value mapping. **Key**: an `lower_snake_case` (a.k.a. "python case") formatted string, name of the fork. -**Value**: an unsigned integer number, epoch number of activation of the fork + +**Value**: an unsigned integer number, epoch number of activation of the fork. Timelines may contain comments to describe the values. -See `mainnet.yaml` for a complete example. +See [`mainnet.yaml`](./mainnet.yaml) for a complete example. diff --git a/configs/fork_timelines/mainnet.yaml b/configs/fork_timelines/mainnet.yaml index 8d51d6582..0bb3c9db1 100644 --- a/configs/fork_timelines/mainnet.yaml +++ b/configs/fork_timelines/mainnet.yaml @@ -7,6 +7,6 @@ phase0: 67108864 # phase0_funny_fork_name: 67116000 # Example 2: -# Should be equal to PHASE_1_GENESIS_EPOCH +# Should be equal to PHASE_1_FORK_EPOCH # (placeholder in example value here) # phase1: 67163000 diff --git a/deposit_contract/README.md b/deposit_contract/README.md new file mode 100644 index 000000000..16779e777 --- /dev/null +++ b/deposit_contract/README.md @@ -0,0 +1,24 @@ +# Deposit contract + +## How to set up the testing environment? + +Under the `eth2.0-specs` directory, execute: + +```sh +make install_deposit_contract_test +``` + +## How to compile the contract? + +```sh +make compile_deposit_contract +``` + +The ABI and bytecode will be updated at [`contracts/validator_registration.json`](./contracts/validator_registration.json). + + +## How to run tests? + +```sh +make test_deposit_contract +``` diff --git a/scripts/phase0/__init__.py b/deposit_contract/contracts/__init__.py similarity index 100% rename from scripts/phase0/__init__.py rename to deposit_contract/contracts/__init__.py diff --git a/deposit_contract/contracts/validator_registration.json b/deposit_contract/contracts/validator_registration.json new file mode 100644 index 000000000..08d57f80a --- /dev/null +++ b/deposit_contract/contracts/validator_registration.json @@ -0,0 +1 @@ +{"abi": [{"name": "Deposit", "inputs": [{"type": "bytes", "name": "pubkey", "indexed": false}, {"type": "bytes", "name": "withdrawal_credentials", "indexed": false}, {"type": "bytes", "name": "amount", "indexed": false}, {"type": "bytes", "name": "signature", "indexed": false}, {"type": "bytes", "name": "merkle_tree_index", "indexed": false}], "anonymous": false, "type": "event"}, {"name": "Eth2Genesis", "inputs": [{"type": "bytes32", "name": "deposit_root", "indexed": false}, {"type": "bytes", "name": "deposit_count", "indexed": false}, {"type": "bytes", "name": "time", "indexed": false}], "anonymous": false, "type": "event"}, {"outputs": [], "inputs": [], "constant": false, "payable": false, "type": "constructor"}, {"name": "to_little_endian_64", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [{"type": "uint256", "name": "value"}], "constant": true, "payable": false, "type": "function", "gas": 7077}, {"name": "get_deposit_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 79221}, {"name": "get_deposit_count", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 11026}, {"name": "deposit", "outputs": [], "inputs": [{"type": "bytes", "name": "pubkey"}, {"type": "bytes", "name": "withdrawal_credentials"}, {"type": "bytes", "name": "signature"}], "constant": false, "payable": true, "type": "function", "gas": 445994}, {"name": "chainStarted", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 603}], "bytecode": "0x600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052341561009e57600080fd5b6101406000601f818352015b600061014051602081106100bd57600080fd5b600060c052602060c020015460208261016001015260208101905061014051602081106100e957600080fd5b600060c052602060c020015460208261016001015260208101905080610160526101609050602060c0825160208401600060025af161012757600080fd5b60c0519050606051600161014051018060405190131561014657600080fd5b809190121561015457600080fd5b6020811061016157600080fd5b600060c052602060c02001555b81516001018083528114156100aa575b505061140756600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a0526380673289600051141561026b57602060046101403734156100b457600080fd5b67ffffffffffffffff6101405111156100cc57600080fd5b60006101605261014051610180526101a060006008818352015b6101605160086000811215610103578060000360020a820461010a565b8060020a82025b905090506101605260ff61018051166101c052610160516101c0516101605101101561013557600080fd5b6101c051610160510161016052610180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8600081121561017e578060000360020a8204610185565b8060020a82025b90509050610180525b81516001018083528114156100e6575b505060186008602082066101e001602082840111156101bc57600080fd5b60208061020082610160600060046015f15050818152809050905090508051602001806102a0828460006004600a8704601201f16101f957600080fd5b50506102a05160206001820306601f82010390506103006102a0516008818352015b8261030051111561022b57610247565b6000610300516102c001535b815160010180835281141561021b575b50505060206102805260406102a0510160206001820306601f8201039050610280f3005b63c5f2892f60005114156103c357341561028457600080fd5b6000610140526002546101605261018060006020818352015b600160016101605116141561031e57600061018051602081106102bf57600080fd5b600160c052602060c02001546020826102200101526020810190506101405160208261022001015260208101905080610220526102209050602060c0825160208401600060025af161031057600080fd5b60c05190506101405261038c565b6000610140516020826101a0010152602081019050610180516020811061034457600080fd5b600060c052602060c02001546020826101a0010152602081019050806101a0526101a09050602060c0825160208401600060025af161038257600080fd5b60c0519050610140525b610160600261039a57600080fd5b60028151048152505b815160010180835281141561029d575b50506101405160005260206000f3005b63621fd13060005114156104995734156103dc57600080fd5b60606101c060246380673289610140526002546101605261015c6000305af161040457600080fd5b6101e0805160200180610260828460006004600a8704601201f161042757600080fd5b50506102605160206001820306601f82010390506102c0610260516008818352015b826102c051111561045957610475565b60006102c05161028001535b8151600101808352811415610449575b5050506020610240526040610260510160206001820306601f8201039050610240f3005b63c47e300d600051141561125657606060046101403760506004356004016101a03760306004356004013511156104cf57600080fd5b60406024356004016102203760206024356004013511156104ef57600080fd5b608060443560040161028037606060443560040135111561050f57600080fd5b63ffffffff6002541061052157600080fd5b60306101a0511461053157600080fd5b6020610220511461054157600080fd5b6060610280511461055157600080fd5b633b9aca00610340526103405161056757600080fd5b61034051340461032052633b9aca0061032051101561058557600080fd5b6060610440602463806732896103c052610320516103e0526103dc6000305af16105ae57600080fd5b610460805160200180610360828460006004600a8704601201f16105d157600080fd5b50506002546104a05260006104c0526104a05160016104a0510110156105f657600080fd5b60016104a051016104e05261050060006020818352015b600160016104e05116141561062157610674565b6104c060605160018251018060405190131561063c57600080fd5b809190121561064a57600080fd5b8152506104e0600261065b57600080fd5b60028151048152505b815160010180835281141561060d575b505060006101a06030806020846105e001018260208501600060046016f1505080518201915050600060106020820661056001602082840111156106b757600080fd5b60208061058082610520600060046015f15050818152809050905090506010806020846105e001018260208501600060046013f1505080518201915050806105e0526105e09050602060c0825160208401600060025af161071757600080fd5b60c051905061054052600060006040602082066106800161028051828401111561074057600080fd5b6060806106a0826020602088068803016102800160006004601bf1505081815280905090509050602060c0825160208401600060025af161078057600080fd5b60c051905060208261088001015260208101905060006040602060208206610740016102805182840111156107b457600080fd5b606080610760826020602088068803016102800160006004601bf150508181528090509050905060208060208461080001018260208501600060046015f15050805182019150506105205160208261080001015260208101905080610800526108009050602060c0825160208401600060025af161083157600080fd5b60c051905060208261088001015260208101905080610880526108809050602060c0825160208401600060025af161086857600080fd5b60c051905061066052600060006105405160208261092001015260208101905061022060208060208461092001018260208501600060046015f150508051820191505080610920526109209050602060c0825160208401600060025af16108ce57600080fd5b60c0519050602082610aa00101526020810190506000610360600880602084610a2001018260208501600060046012f150508051820191505060006018602082066109a0016020828401111561092357600080fd5b6020806109c082610520600060046015f1505081815280905090509050601880602084610a2001018260208501600060046014f150508051820191505061066051602082610a2001015260208101905080610a2052610a209050602060c0825160208401600060025af161099657600080fd5b60c0519050602082610aa001015260208101905080610aa052610aa09050602060c0825160208401600060025af16109cd57600080fd5b60c051905061090052610b2060006020818352015b6104c051610b20511215610a62576000610b205160208110610a0357600080fd5b600160c052602060c0200154602082610b4001015260208101905061090051602082610b4001015260208101905080610b4052610b409050602060c0825160208401600060025af1610a5457600080fd5b60c051905061090052610a67565b610a78565b5b81516001018083528114156109e2575b5050610900516104c05160208110610a8f57600080fd5b600160c052602060c02001556002805460018254011015610aaf57600080fd5b60018154018155506060610c4060246380673289610bc0526104a051610be052610bdc6000305af1610ae057600080fd5b610c60805160200180610ca0828460006004600a8704601201f1610b0357600080fd5b505060a0610d2052610d2051610d60526101a0805160200180610d2051610d6001828460006004600a8704601201f1610b3b57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516040818352015b83610d0051101515610b7957610b96565b6000610d00516020850101535b8151600101808352811415610b68575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610d8052610220805160200180610d2051610d6001828460006004600a8704601201f1610bed57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610c2b57610c48565b6000610d00516020850101535b8151600101808352811415610c1a575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610da052610360805160200180610d2051610d6001828460006004600a8704601201f1610c9f57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610cdd57610cfa565b6000610d00516020850101535b8151600101808352811415610ccc575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610dc052610280805160200180610d2051610d6001828460006004600a8704601201f1610d5157600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516060818352015b83610d0051101515610d8f57610dac565b6000610d00516020850101535b8151600101808352811415610d7e575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610de052610ca0805160200180610d2051610d6001828460006004600a8704601201f1610e0357600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610e4157610e5e565b6000610d00516020850101535b8151600101808352811415610e30575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d20527fdc5fc95703516abd38fa03c3737ff3b52dc52347055c8028460fdf5bbe2f12ce610d2051610d60a164077359400061032051101515611254576003805460018254011015610ed357600080fd5b60018154018155506201000060035414156112535742610e205242610e405262015180610eff57600080fd5b62015180610e405106610e20511015610f1757600080fd5b42610e405262015180610f2957600080fd5b62015180610e405106610e2051036202a30042610e205242610e405262015180610f5257600080fd5b62015180610e405106610e20511015610f6a57600080fd5b42610e405262015180610f7c57600080fd5b62015180610e405106610e205103011015610f9657600080fd5b6202a30042610e205242610e405262015180610fb157600080fd5b62015180610e405106610e20511015610fc957600080fd5b42610e405262015180610fdb57600080fd5b62015180610e405106610e20510301610e00526020610ee0600463c5f2892f610e8052610e9c6000305af161100f57600080fd5b610ee051610e60526060610f8060246380673289610f0052600254610f2052610f1c6000305af161103f57600080fd5b610fa0805160200180610fe0828460006004600a8704601201f161106257600080fd5b505060606110c06024638067328961104052610e00516110605261105c6000305af161108d57600080fd5b6110e0805160200180611120828460006004600a8704601201f16110b057600080fd5b5050610e60516111e05260606111a0526111a05161120052610fe08051602001806111a0516111e001828460006004600a8704601201f16110f057600080fd5b50506111a0516111e0015160206001820306601f82010390506111a0516111e00161118081516020818352015b836111805110151561112e5761114b565b6000611180516020850101535b815160010180835281141561111d575b5050505060206111a0516111e0015160206001820306601f82010390506111a05101016111a0526111a051611220526111208051602001806111a0516111e001828460006004600a8704601201f16111a257600080fd5b50506111a0516111e0015160206001820306601f82010390506111a0516111e00161118081516020818352015b83611180511015156111e0576111fd565b6000611180516020850101535b81516001018083528114156111cf575b5050505060206111a0516111e0015160206001820306601f82010390506111a05101016111a0527f08b71ef3f1b58f7a23ffb82e27f12f0888c8403f1ceb0ea7ea26b274e2189d4c6111a0516111e0a160016004555b5b005b63845980e8600051141561127c57341561126f57600080fd5b60045460005260206000f3005b60006000fd5b61018561140703610185600039610185611407036000f3"} \ No newline at end of file diff --git a/deposit_contract/contracts/validator_registration.v.py b/deposit_contract/contracts/validator_registration.v.py new file mode 100644 index 000000000..1d475311a --- /dev/null +++ b/deposit_contract/contracts/validator_registration.v.py @@ -0,0 +1,140 @@ +MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000 # Gwei +FULL_DEPOSIT_AMOUNT: constant(uint256) = 32000000000 # Gwei +CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant(uint256) = 65536 # 2**16 +DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32 +SECONDS_PER_DAY: constant(uint256) = 86400 +MAX_64_BIT_VALUE: constant(uint256) = 18446744073709551615 # 2**64 - 1 +PUBKEY_LENGTH: constant(uint256) = 48 # bytes +WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes +SIGNATURE_LENGTH: constant(uint256) = 96 # bytes +MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1 + +Deposit: event({ + pubkey: bytes[48], + withdrawal_credentials: bytes[32], + amount: bytes[8], + signature: bytes[96], + merkle_tree_index: bytes[8], +}) +Eth2Genesis: event({deposit_root: bytes32, deposit_count: bytes[8], time: bytes[8]}) + +zerohashes: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH] +branch: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH] +deposit_count: uint256 +full_deposit_count: uint256 +chainStarted: public(bool) + + +@public +def __init__(): + for i in range(DEPOSIT_CONTRACT_TREE_DEPTH - 1): + self.zerohashes[i+1] = sha256(concat(self.zerohashes[i], self.zerohashes[i])) + + +@public +@constant +def to_little_endian_64(value: uint256) -> bytes[8]: + assert value <= MAX_64_BIT_VALUE + + # array access for bytes[] not currently supported in vyper so + # reversing bytes using bitwise uint256 manipulations + y: uint256 = 0 + x: uint256 = value + for i in range(8): + y = shift(y, 8) + y = y + bitwise_and(x, 255) + x = shift(x, -8) + + return slice(convert(y, bytes32), start=24, len=8) + + +@public +@constant +def get_deposit_root() -> bytes32: + root: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000 + size: uint256 = self.deposit_count + for h in range(DEPOSIT_CONTRACT_TREE_DEPTH): + if bitwise_and(size, 1) == 1: + root = sha256(concat(self.branch[h], root)) + else: + root = sha256(concat(root, self.zerohashes[h])) + size /= 2 + return root + +@public +@constant +def get_deposit_count() -> bytes[8]: + return self.to_little_endian_64(self.deposit_count) + +@payable +@public +def deposit(pubkey: bytes[PUBKEY_LENGTH], + withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH], + signature: bytes[SIGNATURE_LENGTH]): + # Prevent edge case in computing `self.branch` when `self.deposit_count == MAX_DEPOSIT_COUNT` + # NOTE: reaching this point with the constants as currently defined is impossible due to the + # uni-directional nature of transfers from eth1 to eth2 and the total ether supply (< 130M). + assert self.deposit_count < MAX_DEPOSIT_COUNT + + assert len(pubkey) == PUBKEY_LENGTH + assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH + assert len(signature) == SIGNATURE_LENGTH + + deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei") + assert deposit_amount >= MIN_DEPOSIT_AMOUNT + amount: bytes[8] = self.to_little_endian_64(deposit_amount) + + index: uint256 = self.deposit_count + + # add deposit to merkle tree + i: int128 = 0 + size: uint256 = index + 1 + for _ in range(DEPOSIT_CONTRACT_TREE_DEPTH): + if bitwise_and(size, 1) == 1: + break + i += 1 + size /= 2 + + zero_bytes_32: bytes32 + pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes_32, start=0, len=16))) + signature_root: bytes32 = sha256(concat( + sha256(slice(signature, start=0, len=64)), + sha256(concat(slice(signature, start=64, len=32), zero_bytes_32)) + )) + value: bytes32 = sha256(concat( + sha256(concat(pubkey_root, withdrawal_credentials)), + sha256(concat( + amount, + slice(zero_bytes_32, start=0, len=24), + signature_root, + )) + )) + for j in range(DEPOSIT_CONTRACT_TREE_DEPTH): + if j < i: + value = sha256(concat(self.branch[j], value)) + else: + break + self.branch[i] = value + + self.deposit_count += 1 + log.Deposit( + pubkey, + withdrawal_credentials, + amount, + signature, + self.to_little_endian_64(index), + ) + + if deposit_amount >= FULL_DEPOSIT_AMOUNT: + self.full_deposit_count += 1 + if self.full_deposit_count == CHAIN_START_FULL_DEPOSIT_THRESHOLD: + timestamp_day_boundary: uint256 = ( + as_unitless_number(block.timestamp) - + as_unitless_number(block.timestamp) % SECONDS_PER_DAY + + 2 * SECONDS_PER_DAY + ) + new_deposit_root: bytes32 = self.get_deposit_root() + log.Eth2Genesis(new_deposit_root, + self.to_little_endian_64(self.deposit_count), + self.to_little_endian_64(timestamp_day_boundary)) + self.chainStarted = True diff --git a/deposit_contract/requirements-testing.txt b/deposit_contract/requirements-testing.txt new file mode 100644 index 000000000..b3a90a88a --- /dev/null +++ b/deposit_contract/requirements-testing.txt @@ -0,0 +1,5 @@ +eth-tester[py-evm]==0.1.0b39 +vyper==0.1.0b9 +web3==5.0.0b2 +pytest==3.6.1 +../test_libs/pyspec diff --git a/test_libs/pyspec/eth2spec/test/block_processing/__init__.py b/deposit_contract/tests/__init__.py similarity index 100% rename from test_libs/pyspec/eth2spec/test/block_processing/__init__.py rename to deposit_contract/tests/__init__.py diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py b/deposit_contract/tests/contracts/__init__.py similarity index 100% rename from test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py rename to deposit_contract/tests/contracts/__init__.py diff --git a/deposit_contract/tests/contracts/conftest.py b/deposit_contract/tests/contracts/conftest.py new file mode 100644 index 000000000..69ece247d --- /dev/null +++ b/deposit_contract/tests/contracts/conftest.py @@ -0,0 +1,112 @@ +from random import ( + randint, +) +import re + +import pytest + +import eth_tester +from eth_tester import ( + EthereumTester, + PyEVMBackend, +) +from vyper import ( + compiler, +) +from web3 import Web3 +from web3.providers.eth_tester import ( + EthereumTesterProvider, +) +from .utils import ( + get_deposit_contract_code, + get_deposit_contract_json, +) + + +# Constants +MIN_DEPOSIT_AMOUNT = 1000000000 # Gwei +FULL_DEPOSIT_AMOUNT = 32000000000 # Gwei +CHAIN_START_FULL_DEPOSIT_THRESHOLD = 65536 # 2**16 +DEPOSIT_CONTRACT_TREE_DEPTH = 32 +TWO_TO_POWER_OF_TREE_DEPTH = 2**DEPOSIT_CONTRACT_TREE_DEPTH + + +@pytest.fixture +def tester(): + return EthereumTester(PyEVMBackend()) + + +@pytest.fixture +def a0(tester): + return tester.get_accounts()[0] + + +@pytest.fixture +def w3(tester): + web3 = Web3(EthereumTesterProvider(tester)) + return web3 + + +@pytest.fixture +def registration_contract(w3, tester): + contract_bytecode = get_deposit_contract_json()['bytecode'] + contract_abi = get_deposit_contract_json()['abi'] + registration = w3.eth.contract( + abi=contract_abi, + bytecode=contract_bytecode) + tx_hash = registration.constructor().transact() + tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash) + registration_deployed = w3.eth.contract( + address=tx_receipt.contractAddress, + abi=contract_abi + ) + return registration_deployed + + +@pytest.fixture(scope="session") +def chain_start_full_deposit_thresholds(): + return [randint(1, 5), randint(6, 10), randint(11, 15)] + + +@pytest.fixture(params=[0, 1, 2]) +def modified_registration_contract( + request, + w3, + tester, + chain_start_full_deposit_thresholds): + # Set CHAIN_START_FULL_DEPOSIT_THRESHOLD to different threshold t + registration_code = get_deposit_contract_code() + t = str(chain_start_full_deposit_thresholds[request.param]) + modified_registration_code = re.sub( + r'CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant\(uint256\) = [0-9]+', + 'CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant(uint256) = ' + t, + registration_code, + ) + assert modified_registration_code != registration_code + contract_bytecode = compiler.compile_code(modified_registration_code)['bytecode'] + contract_abi = compiler.mk_full_signature(modified_registration_code) + registration = w3.eth.contract( + abi=contract_abi, + bytecode=contract_bytecode) + tx_hash = registration.constructor().transact() + tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash) + registration_deployed = w3.eth.contract( + address=tx_receipt.contractAddress, + abi=contract_abi + ) + setattr( + registration_deployed, + 'chain_start_full_deposit_threshold', + chain_start_full_deposit_thresholds[request.param] + ) + return registration_deployed + + +@pytest.fixture +def assert_tx_failed(tester): + def assert_tx_failed(function_to_test, exception=eth_tester.exceptions.TransactionFailed): + snapshot_id = tester.take_snapshot() + with pytest.raises(exception): + function_to_test() + tester.revert_to_snapshot(snapshot_id) + return assert_tx_failed diff --git a/deposit_contract/tests/contracts/test_compile.py b/deposit_contract/tests/contracts/test_compile.py new file mode 100644 index 000000000..fc732a6db --- /dev/null +++ b/deposit_contract/tests/contracts/test_compile.py @@ -0,0 +1,19 @@ +from vyper import ( + compiler, +) + +from .utils import ( + get_deposit_contract_code, + get_deposit_contract_json, +) + + +def test_compile_deposit_contract(): + compiled_deposit_contract_json = get_deposit_contract_json() + + deposit_contract_code = get_deposit_contract_code() + abi = compiler.mk_full_signature(deposit_contract_code) + bytecode = compiler.compile_code(deposit_contract_code)['bytecode'] + + assert abi == compiled_deposit_contract_json["abi"] + assert bytecode == compiled_deposit_contract_json["bytecode"] diff --git a/deposit_contract/tests/contracts/test_deposit.py b/deposit_contract/tests/contracts/test_deposit.py new file mode 100644 index 000000000..8492d6347 --- /dev/null +++ b/deposit_contract/tests/contracts/test_deposit.py @@ -0,0 +1,236 @@ +from random import ( + randint, +) + +import pytest + +import eth_utils +from tests.contracts.conftest import ( + DEPOSIT_CONTRACT_TREE_DEPTH, + FULL_DEPOSIT_AMOUNT, + MIN_DEPOSIT_AMOUNT, +) + +from eth2spec.phase0.spec import ( + DepositData, +) +from eth2spec.utils.hash_function import hash +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, +) + + +def compute_merkle_root(leaf_nodes): + assert len(leaf_nodes) >= 1 + empty_node = b'\x00' * 32 + child_nodes = leaf_nodes[:] + for _ in range(DEPOSIT_CONTRACT_TREE_DEPTH): + parent_nodes = [] + if len(child_nodes) % 2 == 1: + child_nodes.append(empty_node) + for j in range(0, len(child_nodes), 2): + parent_nodes.append(hash(child_nodes[j] + child_nodes[j + 1])) + child_nodes = parent_nodes + empty_node = hash(empty_node + empty_node) + return child_nodes[0] + + +@pytest.fixture +def deposit_input(): + """ + pubkey: bytes[48] + withdrawal_credentials: bytes[32] + signature: bytes[96] + """ + return ( + b'\x11' * 48, + b'\x22' * 32, + b'\x33' * 96, + ) + + +@pytest.mark.parametrize( + 'value,success', + [ + (0, True), + (10, True), + (55555, True), + (2**64 - 1, True), + (2**64, False), + ] +) +def test_to_little_endian_64(registration_contract, value, success, assert_tx_failed): + call = registration_contract.functions.to_little_endian_64(value) + + if success: + little_endian_64 = call.call() + assert little_endian_64 == (value).to_bytes(8, 'little') + else: + assert_tx_failed( + lambda: call.call() + ) + + +@pytest.mark.parametrize( + 'success,deposit_amount', + [ + (True, FULL_DEPOSIT_AMOUNT), + (True, MIN_DEPOSIT_AMOUNT), + (False, MIN_DEPOSIT_AMOUNT - 1), + (True, FULL_DEPOSIT_AMOUNT + 1) + ] +) +def test_deposit_amount(registration_contract, + w3, + success, + deposit_amount, + assert_tx_failed, + deposit_input): + call = registration_contract.functions.deposit(*deposit_input) + if success: + assert call.transact({"value": deposit_amount * eth_utils.denoms.gwei}) + else: + assert_tx_failed( + lambda: call.transact({"value": deposit_amount * eth_utils.denoms.gwei}) + ) + + +@pytest.mark.parametrize( + 'invalid_pubkey,invalid_withdrawal_credentials,invalid_signature,success', + [ + (False, False, False, True), + (True, False, False, False), + (False, True, False, False), + (False, False, True, False), + ] +) +def test_deposit_inputs(registration_contract, + w3, + assert_tx_failed, + deposit_input, + invalid_pubkey, + invalid_withdrawal_credentials, + invalid_signature, + success): + pubkey = deposit_input[0][2:] if invalid_pubkey else deposit_input[0] + if invalid_withdrawal_credentials: # this one is different to satisfy linter + withdrawal_credentials = deposit_input[1][2:] + else: + withdrawal_credentials = deposit_input[1] + signature = deposit_input[2][2:] if invalid_signature else deposit_input[2] + + call = registration_contract.functions.deposit( + pubkey, + withdrawal_credentials, + signature, + ) + if success: + assert call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei}) + else: + assert_tx_failed( + lambda: call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei}) + ) + + +def test_deposit_log(registration_contract, a0, w3, deposit_input): + log_filter = registration_contract.events.Deposit.createFilter( + fromBlock='latest', + ) + + deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(3)] + for i in range(3): + registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) + + logs = log_filter.get_new_entries() + assert len(logs) == 1 + log = logs[0]['args'] + + assert log['pubkey'] == deposit_input[0] + assert log['withdrawal_credentials'] == deposit_input[1] + assert log['amount'] == deposit_amount_list[i].to_bytes(8, 'little') + assert log['signature'] == deposit_input[2] + assert log['merkle_tree_index'] == i.to_bytes(8, 'little') + + +def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input): + log_filter = registration_contract.events.Deposit.createFilter( + fromBlock='latest', + ) + + deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)] + leaf_nodes = [] + for i in range(0, 10): + tx_hash = registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) + receipt = w3.eth.getTransactionReceipt(tx_hash) + print("deposit transaction consumes %d gas" % receipt['gasUsed']) + + logs = log_filter.get_new_entries() + assert len(logs) == 1 + log = logs[0]['args'] + + assert log["merkle_tree_index"] == i.to_bytes(8, 'little') + + deposit_data = DepositData( + pubkey=deposit_input[0], + withdrawal_credentials=deposit_input[1], + amount=deposit_amount_list[i], + signature=deposit_input[2], + ) + hash_tree_root_result = hash_tree_root(deposit_data) + leaf_nodes.append(hash_tree_root_result) + root = compute_merkle_root(leaf_nodes) + assert root == registration_contract.functions.get_deposit_root().call() + + +def test_chain_start(modified_registration_contract, w3, assert_tx_failed, deposit_input): + t = getattr(modified_registration_contract, 'chain_start_full_deposit_threshold') + # CHAIN_START_FULL_DEPOSIT_THRESHOLD is set to t + min_deposit_amount = MIN_DEPOSIT_AMOUNT * eth_utils.denoms.gwei # in wei + full_deposit_amount = FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei + log_filter = modified_registration_contract.events.Eth2Genesis.createFilter( + fromBlock='latest', + ) + + index_not_full_deposit = randint(0, t - 1) + for i in range(t): + if i == index_not_full_deposit: + # Deposit with value below FULL_DEPOSIT_AMOUNT + modified_registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": min_deposit_amount}) + logs = log_filter.get_new_entries() + # Eth2Genesis event should not be triggered + assert len(logs) == 0 + else: + # Deposit with value FULL_DEPOSIT_AMOUNT + modified_registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": full_deposit_amount}) + logs = log_filter.get_new_entries() + # Eth2Genesis event should not be triggered + assert len(logs) == 0 + + # Make 1 more deposit with value FULL_DEPOSIT_AMOUNT to trigger Eth2Genesis event + modified_registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": full_deposit_amount}) + logs = log_filter.get_new_entries() + assert len(logs) == 1 + timestamp = int(w3.eth.getBlock(w3.eth.blockNumber)['timestamp']) + timestamp_day_boundary = timestamp + (86400 - timestamp % 86400) + 86400 + log = logs[0]['args'] + assert log['deposit_root'] == modified_registration_contract.functions.get_deposit_root().call() + assert int.from_bytes(log['time'], byteorder='little') == timestamp_day_boundary + assert modified_registration_contract.functions.chainStarted().call() is True + + # Make 1 deposit with value FULL_DEPOSIT_AMOUNT and + # check that Eth2Genesis event is not triggered + modified_registration_contract.functions.deposit( + *deposit_input, + ).transact({"value": full_deposit_amount}) + logs = log_filter.get_new_entries() + assert len(logs) == 0 diff --git a/deposit_contract/tests/contracts/utils.py b/deposit_contract/tests/contracts/utils.py new file mode 100644 index 000000000..de7c54489 --- /dev/null +++ b/deposit_contract/tests/contracts/utils.py @@ -0,0 +1,16 @@ +import json +import os + +DIR = os.path.dirname(__file__) + + +def get_deposit_contract_code(): + file_path = os.path.join(DIR, './../../contracts/validator_registration.v.py') + deposit_contract_code = open(file_path).read() + return deposit_contract_code + + +def get_deposit_contract_json(): + file_path = os.path.join(DIR, './../../contracts/validator_registration.json') + deposit_contract_json = open(file_path).read() + return json.loads(deposit_contract_json) diff --git a/deposit_contract/tool/compile_deposit_contract.py b/deposit_contract/tool/compile_deposit_contract.py new file mode 100644 index 000000000..58f974b8d --- /dev/null +++ b/deposit_contract/tool/compile_deposit_contract.py @@ -0,0 +1,33 @@ +import argparse +import json +import os + +from vyper import ( + compiler, +) + +DIR = os.path.dirname(__file__) + + +def generate_compiled_json(file_path: str): + deposit_contract_code = open(file_path).read() + abi = compiler.mk_full_signature(deposit_contract_code) + bytecode = compiler.compile_code(deposit_contract_code)['bytecode'] + contract_json = { + 'abi': abi, + 'bytecode': bytecode, + } + # write json + basename = os.path.basename(file_path) + dirname = os.path.dirname(file_path) + contract_name = basename.split('.')[0] + with open(dirname + "/{}.json".format(contract_name), 'w') as f_write: + json.dump(contract_json, f_write) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("path", type=str, help="the path of the contract") + args = parser.parse_args() + path = args.path + generate_compiled_json(path) diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000..25b46decf --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,32 @@ +# Building pyspecs from specs.md + +The benefit of the particular spec design is that the given markdown files can be converted to a `spec.py` file for the purposes of testing and linting. The result of this is that bugs are discovered and patched more quickly. + +Specs can be built from either a single markdown document or multiple files that must be combined in a given order. Given 2 spec objects, `build_spec.combine_spec_objects` will combine them into a single spec object which, subsequently, can be converted into a `specs.py`. + +## Usage + +For usage of the spec builder run `python3 -m build_spec --help`. + +## `@Labels` and inserts + +The functioning of the spec combiner is largely automatic in that given `spec0.md` and `spec1.md`, SSZ Objects will be extended and old functions will be overwritten. Extra functionality is provided for more granular control over how files are combined. In the event that only a small portion of code is to be added to an existing function, insert functionality is provided. This saves having to completely redefine the old function from `spec0.md` in `spec1.md`. This is done by marking where the change is to occur in the old file and marking which code is to be inserted in the new file. This is done as follows: + +* In the old file, a label is added as a python comment marking where the code is to be inserted. This would appear as follows in `spec0.md`: + +```python +def foo(x): + x << 1 + # @YourLabelHere + return x +``` + +* In spec1, the new code could then be inserted by having a code-block that looked as follows: + +```python +#begin insert @YourLabelHere + x += x +#end insert @YourLabelHere +``` + +**Note** that the code to be inserted has the **same level of indentation** as the surrounding code of its destination insert point. diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/build_spec.py b/scripts/build_spec.py new file mode 100644 index 000000000..7a51970e3 --- /dev/null +++ b/scripts/build_spec.py @@ -0,0 +1,277 @@ +import re +from function_puller import ( + get_spec, + SpecObject, +) +from argparse import ArgumentParser +from typing import ( + Dict, + List, + Optional, +) + + +PHASE0_IMPORTS = '''from typing import ( + Any, + Dict, + List, + NewType, + Tuple, +) + +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, + signing_root, +) +from eth2spec.utils.ssz.ssz_typing import ( + # unused: uint8, uint16, uint32, uint128, uint256, + uint64, Container, Vector, BytesN +) +from eth2spec.utils.bls import ( + bls_aggregate_pubkeys, + bls_verify, + bls_verify_multiple, +) +# Note: 'int' type defaults to being interpreted as a uint64 by SSZ implementation. + +from eth2spec.utils.hash_function import hash +''' +PHASE1_IMPORTS = '''from typing import ( + Any, + Dict, + List, + NewType, + Tuple, +) + +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, + signing_root, + serialize, + is_empty, +) +from eth2spec.utils.ssz.ssz_typing import ( + # unused: uint8, uint16, uint32, uint128, uint256, + uint64, Container, Vector, BytesN +) +from eth2spec.utils.bls import ( + bls_aggregate_pubkeys, + bls_verify, + bls_verify_multiple, +) + +from eth2spec.utils.hash_function import hash +''' +NEW_TYPES = { + 'Slot': 'int', + 'Epoch': 'int', + 'Shard': 'int', + 'ValidatorIndex': 'int', + 'Gwei': 'int', +} +BYTE_TYPES = [4, 32, 48, 96] +SUNDRY_FUNCTIONS = ''' +def get_ssz_type_by_name(name: str) -> Container: + return globals()[name] + + +# Monkey patch validator compute committee code +_compute_committee = compute_committee +committee_cache = {} + + +def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]: + param_hash = (hash_tree_root(indices), seed, index, count) + + if param_hash in committee_cache: + return committee_cache[param_hash] + else: + ret = _compute_committee(indices, seed, index, count) + committee_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} + + +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret + + +# Access to overwrite spec constants based on configuration +def apply_constants_preset(preset: Dict[str, Any]): + global_vars = globals() + for k, v in preset.items(): + global_vars[k] = v + + # Deal with derived constants + global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) + + # Initialize SSZ types again, to account for changed lengths + init_SSZ_types() +''' + + +def objects_to_spec(functions: Dict[str, str], + constants: Dict[str, str], + ssz_objects: Dict[str, str], + inserts: Dict[str, str], + imports: Dict[str, str], + new_types: Dict[str, str], + byte_types: List[int], + ) -> str: + """ + Given all the objects that constitute a spec, combine them into a single pyfile. + """ + new_type_definitions = \ + '\n'.join(['''%s = NewType('%s', %s)''' % (key, key, value) for key, value in new_types.items()]) + new_type_definitions += '\n' + '\n'.join(['Bytes%s = BytesN[%s]' % (n, n) for n in byte_types]) + functions_spec = '\n\n'.join(functions.values()) + constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants)) + ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values()) + ssz_objects_reinitialization_spec = ( + 'def init_SSZ_types():\n global_vars = globals()\n\n ' + + '\n\n '.join([re.sub(r'(?!\n\n)\n', r'\n ', value[:-1]) for value in ssz_objects.values()]) + + '\n\n' + + '\n'.join(map(lambda x: ' global_vars[\'%s\'] = %s' % (x, x), ssz_objects.keys())) + ) + spec = ( + imports + + '\n' + new_type_definitions + + '\n\n' + constants_spec + + '\n\n\n' + ssz_objects_instantiation_spec + + '\n\n' + functions_spec + + '\n' + SUNDRY_FUNCTIONS + + '\n\n' + ssz_objects_reinitialization_spec + + '\n' + ) + # Handle @inserts + for key, value in inserts.items(): + spec = re.sub('[ ]*# %s\\n' % key, value, spec) + return spec + + +def combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]: + for key, value in new_functions.items(): + old_functions[key] = value + return old_functions + + +def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]: + for key, value in new_constants.items(): + old_constants[key] = value + return old_constants + + +def dependency_order_ssz_objects(objects: Dict[str, str]) -> None: + """ + Determines which SSZ Object is depenedent on which other and orders them appropriately + """ + items = list(objects.items()) + for key, value in items: + dependencies = re.findall(r'(: [A-Z][\w[]*)', value) + dependencies = map(lambda x: re.sub(r'\W|Vector|List|Container|uint\d+|Bytes\d+|bytes', '', x), dependencies) + for dep in dependencies: + if dep in NEW_TYPES or len(dep) == 0: + continue + key_list = list(objects.keys()) + for item in [dep, key] + key_list[key_list.index(dep)+1:]: + objects[item] = objects.pop(item) + + +def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str]) -> Dict[str, str]: + """ + Takes in old spec and new spec ssz objects, combines them, + and returns the newer versions of the objects in dependency order. + """ + for key, value in new_objects.items(): + if key in old_objects: + # remove trailing newline + old_objects[key] = old_objects[key] + # remove leading variable name + value = re.sub(r'^class [\w]*\(Container\):\n', '', value) + old_objects[key] = old_objects.get(key, '') + value + dependency_order_ssz_objects(old_objects) + return old_objects + + +# inserts are handeled the same way as functions +combine_inserts = combine_functions + + +def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject: + """ + Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function. + """ + functions0, constants0, ssz_objects0, inserts0 = spec0 + functions1, constants1, ssz_objects1, inserts1 = spec1 + functions = combine_functions(functions0, functions1) + constants = combine_constants(constants0, constants1) + ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1) + inserts = combine_inserts(inserts0, inserts1) + return functions, constants, ssz_objects, inserts + + +def build_phase0_spec(sourcefile: str, outfile: str=None) -> Optional[str]: + functions, constants, ssz_objects, inserts = get_spec(sourcefile) + spec = objects_to_spec(functions, constants, ssz_objects, inserts, PHASE0_IMPORTS, NEW_TYPES, BYTE_TYPES) + if outfile is not None: + with open(outfile, 'w') as out: + out.write(spec) + return spec + + +def build_phase1_spec(phase0_sourcefile: str, + phase1_custody_sourcefile: str, + phase1_shard_sourcefile: str, + outfile: str=None) -> Optional[str]: + phase0_spec = get_spec(phase0_sourcefile) + phase1_custody = get_spec(phase1_custody_sourcefile) + phase1_shard_data = get_spec(phase1_shard_sourcefile) + spec_objects = phase0_spec + for value in [phase1_custody, phase1_shard_data]: + spec_objects = combine_spec_objects(spec_objects, value) + spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS, NEW_TYPES, BYTE_TYPES) + if outfile is not None: + with open(outfile, 'w') as out: + out.write(spec) + return spec + + +if __name__ == '__main__': + description = ''' +Build the specs from the md docs. +If building phase 0: + 1st argument is input spec.md + 2nd argument is output spec.py + +If building phase 1: + 1st argument is input spec_phase0.md + 2nd argument is input spec_phase1_custody.md + 3rd argument is input spec_phase1_shard_data.md + 4th argument is output spec.py +''' + parser = ArgumentParser(description=description) + parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #") + parser.add_argument(dest="files", help="Input and output files", nargs="+") + + args = parser.parse_args() + if args.phase == 0: + if len(args.files) == 2: + build_phase0_spec(*args.files) + else: + print(" Phase 0 requires an output as well as an input file.") + elif args.phase == 1: + if len(args.files) == 4: + build_phase1_spec(*args.files) + else: + print(" Phase 1 requires an output as well as 3 input files (phase0.md and phase1.md, phase1.md)") + else: + print("Invalid phase: {0}".format(args.phase)) diff --git a/scripts/function_puller.py b/scripts/function_puller.py new file mode 100644 index 000000000..303d4ec2f --- /dev/null +++ b/scripts/function_puller.py @@ -0,0 +1,83 @@ +import re +from typing import Dict, Tuple, NewType + + +FUNCTION_REGEX = r'^def [\w_]*' +BEGIN_INSERT_REGEX = r'# begin insert ' +END_INSERT_REGEX = r'# end insert' + +SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]]) + + +def get_spec(file_name: str) -> SpecObject: + """ + Takes in the file name of a spec.md file, opens it and returns the following objects: + functions = {function_name: function_code} + constants= {constant_name: constant_code} + ssz_objects= {object_name: object} + inserts= {insert_tag: code to be inserted} + + Note: This function makes heavy use of the inherent ordering of dicts, + if this is not supported by your python version, it will not work. + """ + pulling_from = None # line number of start of latest object + current_name = None # most recent section title + insert_name = None # stores the label of the current insert object + functions = {} + constants = {} + ssz_objects = {} + inserts = {} + function_matcher = re.compile(FUNCTION_REGEX) + inserts_matcher = re.compile(BEGIN_INSERT_REGEX) + for linenum, line in enumerate(open(file_name).readlines()): + line = line.rstrip() + if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': + current_name = line[line[:-1].rfind('`') + 1: -1] + if line[:9] == '```python': + assert pulling_from is None + pulling_from = linenum + 1 + elif line[:3] == '```': + pulling_from = None + elif inserts_matcher.match(line) is not None: + # Find @insert names + insert_name = re.search(r'@[\w]*', line).group(0) + elif insert_name is not None: + # In insert mode, either the next line is more code, or the end of the insert + if re.match(END_INSERT_REGEX, line) is not None: + insert_name = None + else: + inserts[insert_name] = inserts.get(insert_name, '') + line + '\n' + else: + # Handle function definitions & ssz_objects + if pulling_from is not None: + # SSZ Object + if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):': + name = line[6:-12] + # Check consistency with markdown header + assert name == current_name + is_ssz = True + # function definition + elif function_matcher.match(line) is not None: + current_name = function_matcher.match(line).group(0) + is_ssz = False + if is_ssz: + ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n' + else: + functions[current_name] = functions.get(current_name, '') + line + '\n' + # Handle constant table entries + elif pulling_from is None and len(line) > 0 and line[0] == '|': + row = line[1:].split('|') + if len(row) >= 2: + for i in range(2): + row[i] = row[i].strip().strip('`') + if '`' in row[i]: + row[i] = row[i][:row[i].find('`')] + eligible = True + if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_': + eligible = False + for c in row[0]: + if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': + eligible = False + if eligible: + constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890') + return functions, constants, ssz_objects, inserts diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py deleted file mode 100644 index 26b0e5a8a..000000000 --- a/scripts/phase0/build_spec.py +++ /dev/null @@ -1,92 +0,0 @@ -import sys -import function_puller - - -def build_phase0_spec(sourcefile, outfile): - code_lines = [] - code_lines.append(""" -from typing import ( - Any, - Dict, - List, - NewType, - Tuple, -) -from eth2spec.utils.minimal_ssz import * -from eth2spec.utils.bls import * - -""") - for i in (1, 2, 3, 4, 8, 32, 48, 96): - code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) - - code_lines.append(""" - -# stub, will get overwritten by real var -SLOTS_PER_EPOCH = 64 - - -Slot = NewType('Slot', int) # uint64 -Epoch = NewType('Epoch', int) # uint64 -Shard = NewType('Shard', int) # uint64 -ValidatorIndex = NewType('ValidatorIndex', int) # uint64 -Gwei = NewType('Gwei', int) # uint64 -Bytes32 = NewType('Bytes32', bytes) # bytes32 -BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 -BLSSignature = NewType('BLSSignature', bytes) # bytes96 -Store = None -""") - - code_lines += function_puller.get_spec(sourcefile) - - code_lines.append(""" -# Monkey patch validator compute committee code -_compute_committee = compute_committee -committee_cache = {} - - -def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]: - param_hash = (hash_tree_root(indices), seed, index, count) - - if param_hash in committee_cache: - return committee_cache[param_hash] - else: - ret = _compute_committee(indices, seed, index, count) - committee_cache[param_hash] = ret - return ret - - -# Monkey patch hash cache -_hash = hash -hash_cache = {} - - -def hash(x): - if x in hash_cache: - return hash_cache[x] - else: - ret = _hash(x) - hash_cache[x] = ret - return ret - -# Access to overwrite spec constants based on configuration -def apply_constants_preset(preset: Dict[str, Any]): - global_vars = globals() - for k, v in preset.items(): - global_vars[k] = v - - # Deal with derived constants - global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) - - # Initialize SSZ types again, to account for changed lengths - init_SSZ_types() -""") - - with open(outfile, 'w') as out: - out.write("\n".join(code_lines)) - - -if __name__ == '__main__': - if len(sys.argv) < 3: - print("Usage: ") - build_phase0_spec(sys.argv[1], sys.argv[2]) - diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py deleted file mode 100644 index 1fad41fa9..000000000 --- a/scripts/phase0/function_puller.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys -from typing import List - - -def get_spec(file_name: str) -> List[str]: - code_lines = [] - pulling_from = None - current_name = None - current_typedef = None - type_defs = [] - for linenum, line in enumerate(open(sys.argv[1]).readlines()): - line = line.rstrip() - if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': - current_name = line[line[:-1].rfind('`') + 1: -1] - if line[:9] == '```python': - assert pulling_from is None - pulling_from = linenum + 1 - elif line[:3] == '```': - if pulling_from is None: - pulling_from = linenum - else: - if current_typedef is not None: - assert code_lines[-1] == '}' - code_lines[-1] = '})' - current_typedef[-1] = '})' - type_defs.append((current_name, current_typedef)) - pulling_from = None - current_typedef = None - else: - if pulling_from == linenum and line == '{': - code_lines.append('%s = SSZType({' % current_name) - current_typedef = ['global_vars["%s"] = SSZType({' % current_name] - elif pulling_from is not None: - # Add some whitespace between functions - if line[:3] == 'def': - code_lines.append('') - code_lines.append('') - code_lines.append(line) - # Remember type def lines - if current_typedef is not None: - current_typedef.append(line) - elif pulling_from is None and len(line) > 0 and line[0] == '|': - row = line[1:].split('|') - if len(row) >= 2: - for i in range(2): - row[i] = row[i].strip().strip('`') - if '`' in row[i]: - row[i] = row[i][:row[i].find('`')] - eligible = True - if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_': - eligible = False - for c in row[0]: - if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': - eligible = False - if eligible: - code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890'))) - # Build type-def re-initialization - code_lines.append('') - code_lines.append('def init_SSZ_types():') - code_lines.append(' global_vars = globals()') - for ssz_type_name, ssz_type in type_defs: - code_lines.append('') - for type_line in ssz_type: - code_lines.append(' ' + type_line) - code_lines.append('\n') - code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']') - code_lines.append('\n') - code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:') - code_lines.append(' return globals()[name]') - code_lines.append('') - return code_lines diff --git a/specs/bls_signature.md b/specs/bls_signature.md index 18e2d8c9a..3fe1bcc0e 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -1,6 +1,8 @@ # BLS signature verification -**Warning: This document is pending academic review and should not yet be considered secure.** +**Notice**: This document is a placeholder to facilitate the emergence of cross-client testnets. Substantive changes are postponed until [BLS standardisation](https://github.com/pairingwg/bls_standard) is finalized. + +**Warning**: The constructions in this document should not be considered secure. In particular, the `hash_to_G2` function is known to be unsecure. ## Table of contents @@ -118,7 +120,7 @@ Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `sig ## Signature verification -In the following `e` is the pairing function and `g` is the G1 generator with the following coordinates (see [here](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381#g1)): +In the following, `e` is the pairing function and `g` is the G1 generator with the following coordinates (see [here](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381#g1)): ```python g_x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1ca1e9d4e..a6d9d23c5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- The Beacon Chain -**NOTICE**: This document is a work in progress for researchers and implementers. +**Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -17,22 +17,22 @@ - [Initial values](#initial-values) - [Time parameters](#time-parameters) - [State list lengths](#state-list-lengths) - - [Reward and penalty quotients](#reward-and-penalty-quotients) + - [Rewards and penalties](#rewards-and-penalties) - [Max operations per block](#max-operations-per-block) - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [Misc dependencies](#misc-dependencies) - [`Fork`](#fork) + - [`Validator`](#validator) - [`Crosslink`](#crosslink) - - [`Eth1Data`](#eth1data) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`IndexedAttestation`](#indexedattestation) + - [`PendingAttestation`](#pendingattestation) + - [`Eth1Data`](#eth1data) + - [`HistoricalBatch`](#historicalbatch) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - - [`Validator`](#validator) - - [`PendingAttestation`](#pendingattestation) - - [`HistoricalBatch`](#historicalbatch) - [Beacon operations](#beacon-operations) - [`ProposerSlashing`](#proposerslashing) - [`AttesterSlashing`](#attesterslashing) @@ -45,12 +45,13 @@ - [`BeaconBlock`](#beaconblock) - [Beacon state](#beacon-state) - [`BeaconState`](#beaconstate) - - [Custom Types](#custom-types) + - [Custom types](#custom-types) - [Helper functions](#helper-functions) - [`xor`](#xor) - [`hash`](#hash) - [`hash_tree_root`](#hash_tree_root) - [`signing_root`](#signing_root) + - [`bls_domain`](#bls_domain) - [`slot_to_epoch`](#slot_to_epoch) - [`get_previous_epoch`](#get_previous_epoch) - [`get_current_epoch`](#get_current_epoch) @@ -63,7 +64,7 @@ - [`get_epoch_committee_count`](#get_epoch_committee_count) - [`get_shard_delta`](#get_shard_delta) - [`get_epoch_start_shard`](#get_epoch_start_shard) - - [`get_attestation_slot`](#get_attestation_slot) + - [`get_attestation_data_slot`](#get_attestation_data_slot) - [`get_block_root_at_slot`](#get_block_root_at_slot) - [`get_block_root`](#get_block_root) - [`get_randao_mix`](#get_randao_mix) @@ -75,14 +76,14 @@ - [`compute_committee`](#compute_committee) - [`get_crosslink_committee`](#get_crosslink_committee) - [`get_attesting_indices`](#get_attesting_indices) - - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) + - [`int_to_bytes`](#int_to_bytes) - [`bytes_to_int`](#bytes_to_int) - [`get_total_balance`](#get_total_balance) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - [`convert_to_indexed`](#convert_to_indexed) - - [`verify_indexed_attestation`](#verify_indexed_attestation) + - [`validate_indexed_attestation`](#validate_indexed_attestation) - [`is_slashable_attestation_data`](#is_slashable_attestation_data) - [`integer_squareroot`](#integer_squareroot) - [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch) @@ -93,19 +94,20 @@ - [Routines for updating validator status](#routines-for-updating-validator-status) - [`initiate_validator_exit`](#initiate_validator_exit) - [`slash_validator`](#slash_validator) - - [On genesis](#on-genesis) + - [Genesis](#genesis) + - [`Eth2Genesis`](#eth2genesis) + - [Genesis state](#genesis-state) + - [Genesis block](#genesis-block) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - - [State caching](#state-caching) - - [Per-epoch processing](#per-epoch-processing) + - [Epoch processing](#epoch-processing) - [Helper functions](#helper-functions-1) - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks) - - [Rewards and penalties](#rewards-and-penalties) + - [Rewards and penalties](#rewards-and-penalties-1) - [Registry updates](#registry-updates) - [Slashings](#slashings) - [Final updates](#final-updates) - - [Per-slot processing](#per-slot-processing) - - [Per-block processing](#per-block-processing) + - [Block processing](#block-processing) - [Block header](#block-header) - [RANDAO](#randao) - [Eth1 data](#eth1-data) @@ -116,7 +118,6 @@ - [Deposits](#deposits) - [Voluntary exits](#voluntary-exits) - [Transfers](#transfers) - - [State root verification](#state-root-verification) @@ -134,25 +135,25 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Terminology -* **Validator** - a registered participant in the beacon chain. You can become one by sending ether into the Ethereum 1.0 deposit contract. -* **Active validator** - an active participant in the Ethereum 2.0 consensus invited to, among other things, propose and attest to blocks and vote for crosslinks. -* **Committee** - a (pseudo-) randomly sampled subset of [active validators](#dfn-active-validator). When a committee is referred to collectively, as in "this committee attests to X", this is assumed to mean "some subset of that committee that contains enough [validators](#dfn-validator) that the protocol recognizes it as representing the committee". -* **Proposer** - the [validator](#dfn-validator) that creates a beacon chain block. -* **Attester** - a [validator](#dfn-validator) that is part of a committee that needs to sign off on a beacon chain block while simultaneously creating a link (crosslink) to a recent shard block on a particular shard chain. -* **Beacon chain** - the central PoS chain that is the base of the sharding system. -* **Shard chain** - one of the chains on which user transactions take place and account data is stored. -* **Block root** - a 32-byte Merkle root of a beacon chain block or shard chain block. Previously called "block hash". -* **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. -* **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. -* **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. -* **Finalized**, **justified** - see the [Casper FFG paper](https://arxiv.org/abs/1710.09437). -* **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. -* **Genesis time** - the Unix time of the genesis beacon chain block at slot 0. +* **Validator**—a registered participant in the beacon chain. You can become one by sending ether into the Ethereum 1.0 deposit contract. +* **Active validator**—an active participant in the Ethereum 2.0 consensus invited to, among other things, propose and attest to blocks and vote for crosslinks. +* **Committee**—a (pseudo-) randomly sampled subset of [active validators](#dfn-active-validator). When a committee is referred to collectively, as in "this committee attests to X", this is assumed to mean "some subset of that committee that contains enough [validators](#dfn-validator) that the protocol recognizes it as representing the committee". +* **Proposer**—the [validator](#dfn-validator) that creates a beacon chain block. +* **Attester**—a [validator](#dfn-validator) that is part of a committee that needs to sign off on a beacon chain block while simultaneously creating a link (crosslink) to a recent shard block on a particular shard chain. +* **Beacon chain**—the central PoS chain that is the base of the sharding system. +* **Shard chain**—one of the chains on which user transactions take place and account data is stored. +* **Block root**—a 32-byte Merkle root of a beacon chain block or shard chain block. Previously called "block hash". +* **Crosslink**—a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. +* **Slot**—a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. +* **Epoch**—an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. +* **Finalized**, **justified**—see the [Casper FFG paper](https://arxiv.org/abs/1710.09437). +* **Withdrawal period**—the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. +* **Genesis time**—the Unix time of the genesis beacon chain block at slot 0. ## Constants -Note: the default mainnet values for the constants are included here for spec-design purposes. -The different configurations for mainnet, testnets, and yaml-based testing can be found in the `configs/constant_presets/` directory. +*Note*: The default mainnet values for the constants are included here for spec-design purposes. +The different configurations for mainnet, testnets, and YAML-based testing can be found in the `configs/constant_presets/` directory. These configurations are updated for releases, but may be out of sync during `dev` changes. ### Misc @@ -165,7 +166,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `BASE_REWARDS_PER_EPOCH` | `5` | -| `SHUFFLE_ROUND_COUNT` | 90 | +| `SHUFFLE_ROUND_COUNT` | `90` | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -191,8 +192,8 @@ These configurations are updated for releases, but may be out of sync during `de | `GENESIS_SLOT` | `0` | | `GENESIS_EPOCH` | `0` | | `FAR_FUTURE_EPOCH` | `2**64 - 1` | -| `ZERO_HASH` | `int_to_bytes32(0)` | -| `BLS_WITHDRAWAL_PREFIX_BYTE` | `int_to_bytes1(0)` | +| `ZERO_HASH` | `b'\x00' * 32` | +| `BLS_WITHDRAWAL_PREFIX` | `0` | ### Time parameters @@ -206,10 +207,10 @@ These configurations are updated for releases, but may be out of sync during `de | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | -| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | +| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours | | `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes | -* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` +* `MAX_EPOCHS_PER_CROSSLINK` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` ### State list lengths @@ -219,17 +220,17 @@ These configurations are updated for releases, but may be out of sync during `de | `LATEST_ACTIVE_INDEX_ROOTS_LENGTH` | `2**13` (= 8,192) | epochs | ~36 days | | `LATEST_SLASHED_EXIT_LENGTH` | `2**13` (= 8,192) | epochs | ~36 days | -### Reward and penalty quotients +### Rewards and penalties | Name | Value | | - | - | -| `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | +| `BASE_REWARD_FACTOR` | `2**5` (= 32) | | `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | | `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) | | `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) | -* **The `BASE_REWARD_QUOTIENT` is NOT final. Once all other protocol details are finalized it will be adjusted, to target a theoretical maximum total issuance of `2**21` ETH per year if `2**27` ETH is validating (and therefore `2**20` per year if `2**25` ETH is validating, etc etc)** +* **The `BASE_REWARD_FACTOR` is NOT final. Once all other protocol details are finalized, it will be adjusted to target a theoretical maximum total issuance of `2**21` ETH per year if `2**27` ETH is validating (and therefore `2**20` per year if `2**25` ETH is validating, etc.)** * The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. ### Max operations per block @@ -265,160 +266,151 @@ The types are defined topologically to aid in facilitating an executable version #### `Fork` ```python -{ +class Fork(Container): # Previous fork version - 'previous_version': 'bytes4', + previous_version: Bytes4 # Current fork version - 'current_version': 'bytes4', + current_version: Bytes4 # Fork epoch number - 'epoch': 'uint64', -} + epoch: uint64 +``` + +#### `Validator` + +```python +class Validator(Container): + # BLS public key + pubkey: Bytes48 + # Withdrawal credentials + withdrawal_credentials: Bytes32 + # Epoch when became eligible for activation + activation_eligibility_epoch: uint64 + # Epoch when validator activated + activation_epoch: uint64 + # Epoch when validator exited + exit_epoch: uint64 + # Epoch when validator is eligible to withdraw + withdrawable_epoch: uint64 + # Was the validator slashed + slashed: bool + # Effective balance + effective_balance: uint64 ``` #### `Crosslink` ```python -{ - # Epoch number - 'epoch': 'uint64', +class Crosslink(Container): + # Shard number + shard: uint64 + # Crosslinking data from epochs [start....end-1] + start_epoch: uint64 + end_epoch: uint64 # Root of the previous crosslink - 'previous_crosslink_root': 'bytes32', + parent_root: Bytes32 # Root of the crosslinked shard data since the previous crosslink - 'crosslink_data_root': 'bytes32', -} -``` - -#### `Eth1Data` - -```python -{ - # Root of the deposit tree - 'deposit_root': 'bytes32', - # Total number of deposits - 'deposit_count': 'uint64', - # Block hash - 'block_hash': 'bytes32', -} + data_root: Bytes32 ``` #### `AttestationData` ```python -{ +class AttestationData(Container): # LMD GHOST vote - 'beacon_block_root': 'bytes32', + beacon_block_root: Bytes32 # FFG vote - 'source_epoch': 'uint64', - 'source_root': 'bytes32', - 'target_epoch': 'uint64', - 'target_root': 'bytes32', + source_epoch: uint64 + source_root: Bytes32 + target_epoch: uint64 + target_root: Bytes32 # Crosslink vote - 'shard': 'uint64', - 'previous_crosslink_root': 'bytes32', - 'crosslink_data_root': 'bytes32', -} + crosslink: Crosslink ``` #### `AttestationDataAndCustodyBit` ```python -{ +class AttestationDataAndCustodyBit(Container): # Attestation data - 'data': AttestationData, + data: AttestationData # Custody bit - 'custody_bit': 'bool', -} + custody_bit: bool ``` #### `IndexedAttestation` ```python -{ +class IndexedAttestation(Container): # Validator indices - 'custody_bit_0_indices': ['uint64'], - 'custody_bit_1_indices': ['uint64'], + custody_bit_0_indices: List[uint64] + custody_bit_1_indices: List[uint64] # Attestation data - 'data': AttestationData, + data: AttestationData # Aggregate signature - 'signature': 'bytes96', -} -``` - -#### `DepositData` - -```python -{ - # BLS pubkey - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Amount in Gwei - 'amount': 'uint64', - # Container self-signature - 'signature': 'bytes96', -} -``` - -#### `BeaconBlockHeader` - -```python -{ - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'block_body_root': 'bytes32', - 'signature': 'bytes96', -} -``` -#### `Validator` - -```python -{ - # BLS public key - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Epoch when became eligible for activation - 'activation_eligibility_epoch': 'uint64', - # Epoch when validator activated - 'activation_epoch': 'uint64', - # Epoch when validator exited - 'exit_epoch': 'uint64', - # Epoch when validator is eligible to withdraw - 'withdrawable_epoch': 'uint64', - # Was the validator slashed - 'slashed': 'bool', - # Effective balance - 'effective_balance': 'uint64', -} + signature: Bytes96 ``` #### `PendingAttestation` ```python -{ +class PendingAttestation(Container): # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', + aggregation_bitfield: bytes # Attestation data - 'data': AttestationData, + data: AttestationData # Inclusion delay - 'inclusion_delay': 'uint64', + inclusion_delay: uint64 # Proposer index - 'proposer_index': 'uint64', -} + proposer_index: uint64 +``` + +#### `Eth1Data` + +```python +class Eth1Data(Container): + # Root of the deposit tree + deposit_root: Bytes32 + # Total number of deposits + deposit_count: uint64 + # Block hash + block_hash: Bytes32 ``` #### `HistoricalBatch` ```python -{ +class HistoricalBatch(Container): # Block roots - 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] # State roots - 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], -} + state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] +``` + +#### `DepositData` + +```python +class DepositData(Container): + # BLS pubkey + pubkey: Bytes48 + # Withdrawal credentials + withdrawal_credentials: Bytes32 + # Amount in Gwei + amount: uint64 + # Container self-signature + signature: Bytes96 +``` + +#### `BeaconBlockHeader` + +```python +class BeaconBlockHeader(Container): + slot: uint64 + parent_root: Bytes32 + state_root: Bytes32 + body_root: Bytes32 + signature: Bytes96 ``` ### Beacon operations @@ -426,87 +418,79 @@ The types are defined topologically to aid in facilitating an executable version #### `ProposerSlashing` ```python -{ +class ProposerSlashing(Container): # Proposer index - 'proposer_index': 'uint64', + proposer_index: uint64 # First block header - 'header_1': BeaconBlockHeader, + header_1: BeaconBlockHeader # Second block header - 'header_2': BeaconBlockHeader, -} + header_2: BeaconBlockHeader ``` #### `AttesterSlashing` ```python -{ +class AttesterSlashing(Container): # First attestation - 'attestation_1': IndexedAttestation, + attestation_1: IndexedAttestation # Second attestation - 'attestation_2': IndexedAttestation, -} + attestation_2: IndexedAttestation ``` #### `Attestation` ```python -{ +class Attestation(Container): # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', + aggregation_bitfield: bytes # Attestation data - 'data': AttestationData, + data: AttestationData # Custody bitfield - 'custody_bitfield': 'bytes', + custody_bitfield: bytes # BLS aggregate signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` #### `Deposit` ```python -{ +class Deposit(Container): # Branch in the deposit tree - 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], - # Index in the deposit tree - 'index': 'uint64', + proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH] # Data - 'data': DepositData, -} + data: DepositData ``` #### `VoluntaryExit` ```python -{ +class VoluntaryExit(Container): # Minimum epoch for processing exit - 'epoch': 'uint64', + epoch: uint64 # Index of the exiting validator - 'validator_index': 'uint64', + validator_index: uint64 # Validator signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` #### `Transfer` ```python -{ +class Transfer(Container): # Sender index - 'sender': 'uint64', + sender: uint64 # Recipient index - 'recipient': 'uint64', + recipient: uint64 # Amount in Gwei - 'amount': 'uint64', + amount: uint64 # Fee in Gwei for block proposer - 'fee': 'uint64', + fee: uint64 # Inclusion slot - 'slot': 'uint64', + slot: uint64 # Sender withdrawal pubkey - 'pubkey': 'bytes48', + pubkey: Bytes48 # Sender signature - 'signature': 'bytes96', -} + signature: Bytes96 ``` ### Beacon blocks @@ -514,30 +498,28 @@ The types are defined topologically to aid in facilitating an executable version #### `BeaconBlockBody` ```python -{ - 'randao_reveal': 'bytes96', - 'eth1_data': Eth1Data, - 'graffiti': 'bytes32', - 'proposer_slashings': [ProposerSlashing], - 'attester_slashings': [AttesterSlashing], - 'attestations': [Attestation], - 'deposits': [Deposit], - 'voluntary_exits': [VoluntaryExit], - 'transfers': [Transfer], -} +class BeaconBlockBody(Container): + randao_reveal: Bytes96 + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing] + attester_slashings: List[AttesterSlashing] + attestations: List[Attestation] + deposits: List[Deposit] + voluntary_exits: List[VoluntaryExit] + transfers: List[Transfer] ``` #### `BeaconBlock` ```python -{ +class BeaconBlock(Container): # Header - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'body': BeaconBlockBody, - 'signature': 'bytes96', -} + slot: uint64 + parent_root: Bytes32 + state_root: Bytes32 + body: BeaconBlockBody + signature: Bytes96 ``` ### Beacon state @@ -545,49 +527,43 @@ The types are defined topologically to aid in facilitating an executable version #### `BeaconState` ```python -{ +class BeaconState(Container): # Misc - 'slot': 'uint64', - 'genesis_time': 'uint64', - 'fork': Fork, # For versioning hard forks - + slot: uint64 + genesis_time: uint64 + fork: Fork # For versioning hard forks # Validator registry - 'validator_registry': [Validator], - 'balances': ['uint64'], - + validator_registry: List[Validator] + balances: List[uint64] # Randomness and committees - 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'latest_start_shard': 'uint64', - + latest_randao_mixes: Vector[Bytes32, LATEST_RANDAO_MIXES_LENGTH] + latest_start_shard: uint64 # Finality - 'previous_epoch_attestations': [PendingAttestation], - 'current_epoch_attestations': [PendingAttestation], - 'previous_justified_epoch': 'uint64', - 'current_justified_epoch': 'uint64', - 'previous_justified_root': 'bytes32', - 'current_justified_root': 'bytes32', - 'justification_bitfield': 'uint64', - 'finalized_epoch': 'uint64', - 'finalized_root': 'bytes32', - + previous_epoch_attestations: List[PendingAttestation] + current_epoch_attestations: List[PendingAttestation] + previous_justified_epoch: uint64 + current_justified_epoch: uint64 + previous_justified_root: Bytes32 + current_justified_root: Bytes32 + justification_bitfield: uint64 + finalized_epoch: uint64 + finalized_root: Bytes32 # Recent state - 'current_crosslinks': [Crosslink, SHARD_COUNT], - 'previous_crosslinks': [Crosslink, SHARD_COUNT], - 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], - 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], # Balances slashed at every withdrawal period - 'latest_block_header': BeaconBlockHeader, # `latest_block_header.state_root == ZERO_HASH` temporarily - 'historical_roots': ['bytes32'], - + current_crosslinks: Vector[Crosslink, SHARD_COUNT] + previous_crosslinks: Vector[Crosslink, SHARD_COUNT] + latest_block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] + latest_state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT] + latest_active_index_roots: Vector[Bytes32, LATEST_ACTIVE_INDEX_ROOTS_LENGTH] + latest_slashed_balances: Vector[uint64, LATEST_SLASHED_EXIT_LENGTH] + latest_block_header: BeaconBlockHeader + historical_roots: List[Bytes32] # Ethereum 1.0 chain data - 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1Data], - 'deposit_index': 'uint64', -} + latest_eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data] + deposit_index: uint64 ``` -## Custom Types +## Custom types We define the following Python custom types for type hinting and readability: @@ -598,26 +574,25 @@ We define the following Python custom types for type hinting and readability: | `Shard` | `uint64` | a shard number | | `ValidatorIndex` | `uint64` | a validator registry index | | `Gwei` | `uint64` | an amount in Gwei | -| `Bytes32` | `bytes32` | 32 bytes of binary data | -| `BLSPubkey` | `bytes48` | a BLS12-381 public key | -| `BLSSignature` | `bytes96` | a BLS12-381 signature | +| `BLSPubkey` | `Bytes48` | a BLS12-381 public key | +| `BLSSignature` | `Bytes96` | a BLS12-381 signature | ## Helper functions -Note: The definitions below are for specification purposes and are not necessarily optimal implementations. +*Note*: The definitions below are for specification purposes and are not necessarily optimal implementations. ### `xor` ```python def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: - return bytes(a ^ b for a, b in zip(bytes1, bytes2)) + return Bytes32(a ^ b for a, b in zip(bytes1, bytes2)) ``` ### `hash` The `hash` function is SHA256. -Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethereum 2.0 deployment phase. +*Note*: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethereum 2.0 deployment phase. ### `hash_tree_root` @@ -625,7 +600,17 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ### `signing_root` -`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. +`def signing_root(object: Container) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. + +### `bls_domain` + +```python +def bls_domain(domain_type: int, fork_version: bytes=b'\x00\x00\x00\x00') -> int: + """ + Return the bls domain given by the ``domain_type`` and optional 4 byte ``fork_version`` (defaults to zero). + """ + return bytes_to_int(int_to_bytes(domain_type, length=4) + fork_version) +``` ### `slot_to_epoch` @@ -646,7 +631,7 @@ def get_previous_epoch(state: BeaconState) -> Epoch: Return the current epoch if it's genesis epoch. """ current_epoch = get_current_epoch(state) - return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch + return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else current_epoch - 1 ``` ### `get_current_epoch` @@ -759,14 +744,13 @@ def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard: return shard ``` -### `get_attestation_slot` +### `get_attestation_data_slot` ```python -def get_attestation_slot(state: BeaconState, attestation: Attestation) -> Slot: - epoch = attestation.data.target_epoch - committee_count = get_epoch_committee_count(state, epoch) - offset = (attestation.data.shard + SHARD_COUNT - get_epoch_start_shard(state, epoch)) % SHARD_COUNT - return get_epoch_start_slot(epoch) + offset // (committee_count // SLOTS_PER_EPOCH) +def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot: + committee_count = get_epoch_committee_count(state, data.target_epoch) + offset = (data.crosslink.shard + SHARD_COUNT - get_epoch_start_shard(state, data.target_epoch)) % SHARD_COUNT + return get_epoch_start_slot(data.target_epoch) + offset // (committee_count // SLOTS_PER_EPOCH) ``` ### `get_block_root_at_slot` @@ -828,7 +812,7 @@ def generate_seed(state: BeaconState, return hash( get_randao_mix(state, epoch + LATEST_RANDAO_MIXES_LENGTH - MIN_SEED_LOOKAHEAD) + get_active_index_root(state, epoch) + - int_to_bytes32(epoch) + int_to_bytes(epoch, length=32) ) ``` @@ -849,7 +833,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: i = 0 while True: candidate_index = first_committee[(epoch + i) % len(first_committee)] - random_byte = hash(seed + int_to_bytes8(i // 32))[i % 32] + random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] effective_balance = state.validator_registry[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index @@ -886,10 +870,10 @@ def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) - # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) # See the 'generalized domain' algorithm on page 3 for round in range(SHUFFLE_ROUND_COUNT): - pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % index_count - flip = (pivot - index) % index_count + pivot = bytes_to_int(hash(seed + int_to_bytes(round, length=1))[0:8]) % index_count + flip = (pivot + index_count - index) % index_count position = max(index, flip) - source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) + source = hash(seed + int_to_bytes(round, length=1) + int_to_bytes(position // 256, length=4)) byte = source[(position % 256) // 8] bit = (byte >> (position % 8)) % 2 index = flip if bit else index @@ -927,14 +911,17 @@ def get_attesting_indices(state: BeaconState, """ Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``. """ - committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard) + committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.crosslink.shard) assert verify_bitfield(bitfield, len(committee)) return sorted([index for i, index in enumerate(committee) if get_bitfield_bit(bitfield, i) == 0b1]) ``` -### `int_to_bytes1`, `int_to_bytes2`, ... +### `int_to_bytes` -`int_to_bytes1(x): return x.to_bytes(1, 'little')`, `int_to_bytes2(x): return x.to_bytes(2, 'little')`, and so on for all integers, particularly 1, 2, 3, 4, 8, 32, 48, 96. +```python +def int_to_bytes(integer: int, length: int) -> bytes: + return integer.to_bytes(length, 'little') +``` ### `bytes_to_int` @@ -948,9 +935,9 @@ def bytes_to_int(data: bytes) -> int: ```python def get_total_balance(state: BeaconState, indices: List[ValidatorIndex]) -> Gwei: """ - Return the combined effective balance of an array of ``validators``. + Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.) """ - return sum([state.validator_registry[index].effective_balance for index in indices]) + return max(sum([state.validator_registry[index].effective_balance for index in indices]), 1) ``` ### `get_domain` @@ -964,7 +951,7 @@ def get_domain(state: BeaconState, """ epoch = get_current_epoch(state) if message_epoch is None else message_epoch fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version - return bytes_to_int(fork_version + int_to_bytes4(domain_type)) + return bls_domain(domain_type, fork_version) ``` ### `get_bitfield_bit` @@ -1014,35 +1001,29 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedA ) ``` -### `verify_indexed_attestation` +### `validate_indexed_attestation` ```python -def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: +def validate_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> None: """ - Verify validity of ``indexed_attestation`` fields. + Verify validity of ``indexed_attestation``. """ - custody_bit_0_indices = indexed_attestation.custody_bit_0_indices - custody_bit_1_indices = indexed_attestation.custody_bit_1_indices + bit_0_indices = indexed_attestation.custody_bit_0_indices + bit_1_indices = indexed_attestation.custody_bit_1_indices - # Ensure no duplicate indices across custody bits - assert len(set(custody_bit_0_indices).intersection(set(custody_bit_1_indices))) == 0 - - if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] - return False - - if not (1 <= len(custody_bit_0_indices) + len(custody_bit_1_indices) <= MAX_INDICES_PER_ATTESTATION): - return False - - if custody_bit_0_indices != sorted(custody_bit_0_indices): - return False - - if custody_bit_1_indices != sorted(custody_bit_1_indices): - return False - - return bls_verify_multiple( + # Verify no index has custody bit equal to 1 [to be removed in phase 1] + assert len(bit_1_indices) == 0 + # Verify max number of indices + assert len(bit_0_indices) + len(bit_1_indices) <= MAX_INDICES_PER_ATTESTATION + # Verify index sets are disjoint + assert len(set(bit_0_indices).intersection(bit_1_indices)) == 0 + # Verify indices are sorted + assert bit_0_indices == sorted(bit_0_indices) and bit_1_indices == sorted(bit_1_indices) + # Verify aggregate signature + assert bls_verify_multiple( pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_0_indices]), + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_1_indices]), ], message_hashes=[ hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)), @@ -1098,6 +1079,9 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: ```python def get_churn_limit(state: BeaconState) -> int: + """ + Return the churn limit based on the active validator count. + """ return max( MIN_PER_EPOCH_CHURN_LIMIT, len(get_active_validator_indices(state, get_current_epoch(state))) // CHURN_LIMIT_QUOTIENT @@ -1118,14 +1102,14 @@ def get_churn_limit(state: BeaconState) -> int: ### Routines for updating validator status -Note: All functions in this section mutate `state`. +*Note*: All functions in this section mutate `state`. #### `initiate_validator_exit` ```python def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: """ - Initiate the validator of the given ``index``. + Initiate the exit of the validator of the given ``index``. """ # Return if validator already initiated exit validator = state.validator_registry[index] @@ -1147,7 +1131,9 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: #### `slash_validator` ```python -def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex=None) -> None: +def slash_validator(state: BeaconState, + slashed_index: ValidatorIndex, + whistleblower_index: ValidatorIndex=None) -> None: """ Slash the validator with index ``slashed_index``. """ @@ -1168,30 +1154,33 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl decrease_balance(state, slashed_index, whistleblowing_reward) ``` -## On genesis +## Genesis -When enough full deposits have been made to the deposit contract, an `Eth2Genesis` log is emitted. Construct a corresponding `genesis_state` and `genesis_block` as follows: +### `Eth2Genesis` -* Let `genesis_validator_deposits` be the list of deposits, ordered chronologically, up to and including the deposit that triggered the `Eth2Genesis` log. -* Let `genesis_time` be the timestamp specified in the `Eth2Genesis` log. -* Let `genesis_eth1_data` be the `Eth1Data` object where: - * `genesis_eth1_data.deposit_root` is the `deposit_root` contained in the `Eth2Genesis` log. - * `genesis_eth1_data.deposit_count` is the `deposit_count` contained in the `Eth2Genesis` log. - * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log. -* Let `genesis_state = get_genesis_beacon_state(genesis_validator_deposits, genesis_time, genesis_eth1_data)`. -* Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`. +When enough deposits of size `MAX_EFFECTIVE_BALANCE` have been made to the deposit contract an `Eth2Genesis` log is emitted triggering the genesis of the beacon chain. Let: + +* `eth2genesis` be the object corresponding to `Eth2Genesis` +* `genesis_eth1_data` be object of type `Eth1Data` where + * `genesis_eth1_data.deposit_root = eth2genesis.deposit_root` + * `genesis_eth1_data.deposit_count = eth2genesis.deposit_count` + * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log +* `genesis_deposits` be the object of type `List[Deposit]` with deposits ordered chronologically up to and including the deposit that triggered the `Eth2Genesis` log + +### Genesis state + +Let `genesis_state = get_genesis_beacon_state(genesis_deposits, eth2genesis.genesis_time, genesis_eth1_data)`. ```python -def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], - genesis_time: int, - genesis_eth1_data: Eth1Data) -> BeaconState: - """ - Get the genesis ``BeaconState``. - """ - state = BeaconState(genesis_time=genesis_time, latest_eth1_data=genesis_eth1_data) +def get_genesis_beacon_state(deposits: List[Deposit], genesis_time: int, genesis_eth1_data: Eth1Data) -> BeaconState: + state = BeaconState( + genesis_time=genesis_time, + latest_eth1_data=genesis_eth1_data, + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + ) # Process genesis deposits - for deposit in genesis_validator_deposits: + for deposit in deposits: process_deposit(state, deposit) # Process genesis activations @@ -1200,6 +1189,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], validator.activation_eligibility_epoch = GENESIS_EPOCH validator.activation_epoch = GENESIS_EPOCH + # Populate latest_active_index_roots genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root @@ -1207,52 +1197,72 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], return state ``` +### Genesis block + +Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`. + ## Beacon chain state transition function -We now define the state transition function. At a high level, the state transition is made up of four parts: - -1. State caching, which happens at the start of every slot. -2. The per-epoch transitions, which happens at the start of the first slot of every epoch. -3. The per-slot transitions, which happens at every slot. -4. The per-block transitions, which happens at every block. - -Transition section notes: -* The state caching caches the state root of the previous slot and updates block and state roots records. -* The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization. -* The per-slot transitions focus on the slot counter. -* The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`. - -Beacon blocks that trigger unhandled Python exceptions (e.g. out-of-range list accesses) and failed `assert`s during the state transition are considered invalid. - -Note: If there are skipped slots between a block and its parent block, run the steps in the [state-root](#state-caching), [per-epoch](#per-epoch-processing), and [per-slot](#per-slot-processing) sections once for each skipped slot and then once for the slot containing the new block. - -### State caching - -At every `slot > GENESIS_SLOT` run the following function: +The post-state corresponding to a pre-state `state` and a block `block` is defined as `state_transition(state, block)`. State transitions that trigger an unhandled excpetion (e.g. a failed `assert` or an out-of-range list access) are considered invalid. ```python -def cache_state(state: BeaconState) -> None: - # Cache latest known state root (for previous slot) - latest_state_root = hash_tree_root(state) - state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_state_root - - # Store latest known state root (for previous slot) in latest_block_header if it is empty - if state.latest_block_header.state_root == ZERO_HASH: - state.latest_block_header.state_root = latest_state_root - - # Cache latest known block root (for previous slot) - latest_block_root = signing_root(state.latest_block_header) - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_block_root +def state_transition(state: BeaconState, block: BeaconBlock, validate_state_root: bool=False) -> BeaconState: + # Process slots (including those with no blocks) since block + process_slots(state, block.slot) + # Process block + process_block(state, block) + # Validate state root (`validate_state_root == True` in production) + if validate_state_root: + assert block.state_root == hash_tree_root(state) + # Return post-state + return state ``` -### Per-epoch processing +```python +def process_slots(state: BeaconState, slot: Slot) -> None: + assert state.slot <= slot + while state.slot < slot: + process_slot(state) + # Process epoch on the first slot of the next epoch + if (state.slot + 1) % SLOTS_PER_EPOCH == 0: + process_epoch(state) + state.slot += 1 +``` -The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SLOTS_PER_EPOCH == 0`. +```python +def process_slot(state: BeaconState) -> None: + # Cache state root + previous_state_root = hash_tree_root(state) + state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root + + # Cache latest block header state root + if state.latest_block_header.state_root == ZERO_HASH: + state.latest_block_header.state_root = previous_state_root + + # Cache block root + previous_block_root = signing_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root +``` + +### Epoch processing + +Note: the `# @LabelHere` lines below are placeholders to show that code will be inserted here in a future phase. + +```python +def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_crosslinks(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + # @process_reveal_deadlines + # @process_challenge_deadlines + process_slashings(state) + process_final_updates(state) + # @after_process_final_updates +``` #### Helper functions -We define epoch transition helper functions: - ```python def get_total_active_balance(state: BeaconState) -> Gwei: return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) @@ -1276,12 +1286,13 @@ def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[P def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) - if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_slot(state, a)) + if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data)) ] ``` ```python -def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: +def get_unslashed_attesting_indices(state: BeaconState, + attestations: List[PendingAttestation]) -> List[ValidatorIndex]: output = set() for a in attestations: output = output.union(get_attesting_indices(state, a.data, a.aggregation_bitfield)) @@ -1294,39 +1305,24 @@ def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestat ``` ```python -def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink: - return Crosslink( - epoch=min(data.target_epoch, state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS), - previous_crosslink_root=data.previous_crosslink_root, - crosslink_data_root=data.crosslink_data_root, - ) -``` - -```python -def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: - shard_attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.shard == shard] - shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] - candidate_crosslinks = [ - c for c in shard_crosslinks - if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) - ] - if len(candidate_crosslinks) == 0: - return Crosslink(), [] - - def get_attestations_for(crosslink: Crosslink) -> List[PendingAttestation]: - return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink] - # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) - winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: ( - get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root +def get_winning_crosslink_and_attesting_indices(state: BeaconState, + epoch: Epoch, + shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: + attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard] + crosslinks = list(filter( + lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)), + [a.data.crosslink for a in attestations] )) - - return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink)) + # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) + winning_crosslink = max(crosslinks, key=lambda c: ( + get_attesting_balance(state, [a for a in attestations if a.data.crosslink == c]), c.data_root + ), default=Crosslink()) + winning_attestations = [a for a in attestations if a.data.crosslink == winning_crosslink] + return winning_crosslink, get_unslashed_attesting_indices(state, winning_attestations) ``` #### Justification and finalization -Run the following function: - ```python def process_justification_and_finalization(state: BeaconState) -> None: if get_current_epoch(state) <= GENESIS_EPOCH + 1: @@ -1341,12 +1337,16 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_epoch = state.current_justified_epoch state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch)) + previous_epoch_matching_target_balance = get_attesting_balance( + state, get_matching_target_attestations(state, previous_epoch) + ) if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = previous_epoch state.current_justified_root = get_block_root(state, state.current_justified_epoch) state.justification_bitfield |= (1 << 1) - current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch)) + current_epoch_matching_target_balance = get_attesting_balance( + state, get_matching_target_attestations(state, current_epoch) + ) if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = current_epoch state.current_justified_root = get_block_root(state, state.current_justified_epoch) @@ -1355,27 +1355,25 @@ def process_justification_and_finalization(state: BeaconState) -> None: # Process finalizations bitfield = state.justification_bitfield # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: + if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch + 3 == current_epoch: state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, state.finalized_epoch) # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: + if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch + 2 == current_epoch: state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, state.finalized_epoch) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: + if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch + 2 == current_epoch: state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, state.finalized_epoch) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: + if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch + 1 == current_epoch: state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, state.finalized_epoch) ``` #### Crosslinks -Run the following function: - ```python def process_crosslinks(state: BeaconState) -> None: state.previous_crosslinks = [c for c in state.current_crosslinks] @@ -1390,14 +1388,11 @@ def process_crosslinks(state: BeaconState) -> None: #### Rewards and penalties -First, we define additional helpers: - ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - adjusted_quotient = integer_squareroot(get_total_active_balance(state)) // BASE_REWARD_QUOTIENT - if adjusted_quotient == 0: - return 0 - return state.validator_registry[index].effective_balance // adjusted_quotient // BASE_REWARDS_PER_EPOCH + total_balance = get_total_active_balance(state) + effective_balance = state.validator_registry[index].effective_balance + return effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH ``` ```python @@ -1417,7 +1412,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: matching_head_attestations = get_matching_head_attestations(state, previous_epoch) for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations): unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) - attesting_balance = get_attesting_balance(state, attestations) + attesting_balance = get_total_balance(state, unslashed_attesting_indices) for index in eligible_validator_indices: if index in unslashed_attesting_indices: rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance @@ -1440,7 +1435,9 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: for index in eligible_validator_indices: penalties[index] += BASE_REWARDS_PER_EPOCH * get_base_reward(state, index) if index not in matching_target_attesting_indices: - penalties[index] += state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT + penalties[index] += ( + state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT + ) return rewards, penalties ``` @@ -1465,8 +1462,6 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: return rewards, penalties ``` -Run the following function: - ```python def process_rewards_and_penalties(state: BeaconState) -> None: if get_current_epoch(state) == GENESIS_EPOCH: @@ -1481,13 +1476,14 @@ def process_rewards_and_penalties(state: BeaconState) -> None: #### Registry updates -Run the following function: - ```python def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validator_registry): - if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance >= MAX_EFFECTIVE_BALANCE: + if ( + validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and + validator.effective_balance >= MAX_EFFECTIVE_BALANCE + ): validator.activation_eligibility_epoch = get_current_epoch(state) if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: @@ -1508,15 +1504,12 @@ def process_registry_updates(state: BeaconState) -> None: #### Slashings -Run the following function: - ```python def process_slashings(state: BeaconState) -> None: current_epoch = get_current_epoch(state) - active_validator_indices = get_active_validator_indices(state, current_epoch) - total_balance = get_total_balance(state, active_validator_indices) + total_balance = get_total_active_balance(state) - # Compute `total_penalties` + # Compute slashed balances in the current epoch total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] total_penalties = total_at_end - total_at_start @@ -1532,8 +1525,6 @@ def process_slashings(state: BeaconState) -> None: #### Final updates -Run the following function: - ```python def process_final_updates(state: BeaconState) -> None: current_epoch = get_current_epoch(state) @@ -1572,19 +1563,16 @@ def process_final_updates(state: BeaconState) -> None: state.current_epoch_attestations = [] ``` -### Per-slot processing - -At every `slot > GENESIS_SLOT` run the following function: +### Block processing ```python -def advance_slot(state: BeaconState) -> None: - state.slot += 1 +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) ``` -### Per-block processing - -For every `block` except the genesis block, run `process_block_header(state, block)`, `process_randao(state, block)` and `process_eth1_data(state, block)`. - #### Block header ```python @@ -1592,12 +1580,12 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the parent matches - assert block.previous_block_root == signing_root(state.latest_block_header) + assert block.parent_root == signing_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = BeaconBlockHeader( slot=block.slot, - previous_block_root=block.previous_block_root, - block_body_root=hash_tree_root(block.body), + parent_root=block.parent_root, + body_root=hash_tree_root(block.body), ) # Verify proposer is not slashed proposer = state.validator_registry[get_beacon_proposer_index(state)] @@ -1609,39 +1597,57 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: #### RANDAO ```python -def process_randao(state: BeaconState, block: BeaconBlock) -> None: +def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: proposer = state.validator_registry[get_beacon_proposer_index(state)] # Verify that the provided randao value is valid - assert bls_verify(proposer.pubkey, hash_tree_root(get_current_epoch(state)), block.body.randao_reveal, get_domain(state, DOMAIN_RANDAO)) + assert bls_verify( + proposer.pubkey, + hash_tree_root(get_current_epoch(state)), + body.randao_reveal, + get_domain(state, DOMAIN_RANDAO), + ) # Mix it in state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( xor(get_randao_mix(state, get_current_epoch(state)), - hash(block.body.randao_reveal)) + hash(body.randao_reveal)) ) ``` #### Eth1 data ```python -def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: - state.eth1_data_votes.append(block.body.eth1_data) - if state.eth1_data_votes.count(block.body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD: - state.latest_eth1_data = block.body.eth1_data +def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: + state.eth1_data_votes.append(body.eth1_data) + if state.eth1_data_votes.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD: + state.latest_eth1_data = body.eth1_data ``` #### Operations -Note: All functions in this section mutate `state`. +```python +def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Verify that outstanding deposits are processed up to the maximum number of deposits + assert len(body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index) + # Verify that there are no duplicate transfers + assert len(body.transfers) == len(set(body.transfers)) + + for operations, max_operations, function in ( + (body.proposer_slashings, MAX_PROPOSER_SLASHINGS, process_proposer_slashing), + (body.attester_slashings, MAX_ATTESTER_SLASHINGS, process_attester_slashing), + (body.attestations, MAX_ATTESTATIONS, process_attestation), + (body.deposits, MAX_DEPOSITS, process_deposit), + (body.voluntary_exits, MAX_VOLUNTARY_EXITS, process_voluntary_exit), + (body.transfers, MAX_TRANSFERS, process_transfer), + ): + assert len(operations) <= max_operations + for operation in operations: + function(state, operation) +``` ##### Proposer slashings -Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`. - -For each `proposer_slashing` in `block.body.proposer_slashings`, run the following function: - ```python -def process_proposer_slashing(state: BeaconState, - proposer_slashing: ProposerSlashing) -> None: +def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: """ Process ``ProposerSlashing`` operation. """ @@ -1662,21 +1668,16 @@ def process_proposer_slashing(state: BeaconState, ##### Attester slashings -Verify that `len(block.body.attester_slashings) <= MAX_ATTESTER_SLASHINGS`. - -For each `attester_slashing` in `block.body.attester_slashings`, run the following function: - ```python -def process_attester_slashing(state: BeaconState, - attester_slashing: AttesterSlashing) -> None: +def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: """ Process ``AttesterSlashing`` operation. """ attestation_1 = attester_slashing.attestation_1 attestation_2 = attester_slashing.attestation_2 assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) - assert verify_indexed_attestation(state, attestation_1) - assert verify_indexed_attestation(state, attestation_2) + validate_indexed_attestation(state, attestation_1) + validate_indexed_attestation(state, attestation_2) slashed_any = False attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices @@ -1690,50 +1691,43 @@ def process_attester_slashing(state: BeaconState, ##### Attestations -Verify that `len(block.body.attestations) <= MAX_ATTESTATIONS`. - -For each `attestation` in `block.body.attestations`, run the following function: - ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: """ Process ``Attestation`` operation. """ - attestation_slot = get_attestation_slot(state, attestation) + data = attestation.data + attestation_slot = get_attestation_data_slot(state, data) assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH - # Check target epoch, source epoch, source root, and source crosslink - data = attestation.data - assert (data.target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])), - (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])), - } - - # Check crosslink data root - assert data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] - - # Check signature and bitfields - assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) - - # Cache pending attestation pending_attestation = PendingAttestation( data=data, aggregation_bitfield=attestation.aggregation_bitfield, inclusion_delay=state.slot - attestation_slot, proposer_index=get_beacon_proposer_index(state), ) + + assert data.target_epoch in (get_previous_epoch(state), get_current_epoch(state)) if data.target_epoch == get_current_epoch(state): + ffg_data = (state.current_justified_epoch, state.current_justified_root, get_current_epoch(state)) + parent_crosslink = state.current_crosslinks[data.crosslink.shard] state.current_epoch_attestations.append(pending_attestation) else: + ffg_data = (state.previous_justified_epoch, state.previous_justified_root, get_previous_epoch(state)) + parent_crosslink = state.previous_crosslinks[data.crosslink.shard] state.previous_epoch_attestations.append(pending_attestation) + + # Check FFG data, crosslink data, and signature + assert ffg_data == (data.source_epoch, data.source_root, data.target_epoch) + assert data.crosslink.start_epoch == parent_crosslink.end_epoch + assert data.crosslink.end_epoch == min(data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK) + assert data.crosslink.parent_root == hash_tree_root(parent_crosslink) + assert data.crosslink.data_root == ZERO_HASH # [to be removed in phase 1] + validate_indexed_attestation(state, convert_to_indexed(state, attestation)) ``` ##### Deposits -Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)`. - -For each `deposit` in `block.body.deposits`, run the following function: - ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: """ @@ -1744,12 +1738,11 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: leaf=hash_tree_root(deposit.data), proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, - index=deposit.index, + index=state.deposit_index, root=state.latest_eth1_data.deposit_root, ) # Deposits must be processed in order - assert deposit.index == state.deposit_index state.deposit_index += 1 pubkey = deposit.data.pubkey @@ -1757,8 +1750,12 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: validator_pubkeys = [v.pubkey for v in state.validator_registry] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession). - # Invalid signatures are allowed by the deposit contract, and hence included on-chain, but must not be processed. - if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): + # Invalid signatures are allowed by the deposit contract, + # and hence included on-chain, but must not be processed. + # Note: deposits are valid across forks, hence the deposit domain is retrieved directly from `bls_domain` + if not bls_verify( + pubkey, signing_root(deposit.data), deposit.data.signature, bls_domain(DOMAIN_DEPOSIT) + ): return # Add validator and balance entries @@ -1780,10 +1777,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: ##### Voluntary exits -Verify that `len(block.body.voluntary_exits) <= MAX_VOLUNTARY_EXITS`. - -For each `exit` in `block.body.voluntary_exits`, run the following function: - ```python def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: """ @@ -1797,7 +1790,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch # Verify the validator has been active long enough - assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD + assert get_current_epoch(state) >= validator.activation_epoch + PERSISTENT_COMMITTEE_PERIOD # Verify signature domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch) assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain) @@ -1807,10 +1800,6 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: ##### Transfers -Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct. - -For each `transfer` in `block.body.transfers`, run the following function: - ```python def process_transfer(state: BeaconState, transfer: Transfer) -> None: """ @@ -1829,7 +1818,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: # Verify that the pubkey is valid assert ( state.validator_registry[transfer.sender].withdrawal_credentials == - BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] + int_to_bytes(BLS_WITHDRAWAL_PREFIX, length=1) + hash(transfer.pubkey)[1:] ) # Verify that the signature is valid assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER)) @@ -1841,12 +1830,3 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert not (0 < state.balances[transfer.sender] < MIN_DEPOSIT_AMOUNT) assert not (0 < state.balances[transfer.recipient] < MIN_DEPOSIT_AMOUNT) ``` - -#### State root verification - -Verify the block's `state_root` by running the following function: - -```python -def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: - assert block.state_root == hash_tree_root(state) -``` diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index 6843e407e..e80dad1c5 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- Deposit Contract -**NOTICE**: This document is a work in progress for researchers and implementers. +**Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -24,7 +24,7 @@ ## Introduction -This document represents is the specification for the beacon chain deposit contract, part of Ethereum 2.0 phase 0. +This document represents the specification for the beacon chain deposit contract, part of Ethereum 2.0 Phase 0. ## Constants @@ -40,11 +40,11 @@ This document represents is the specification for the beacon chain deposit contr | - | - | | `DEPOSIT_CONTRACT_ADDRESS` | **TBD** | | `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | -| `CHAIN_START_FULL_DEPOSIT_THRESHOLD` | `2**16` (=65,536) | +| `CHAIN_START_FULL_DEPOSIT_THRESHOLD` | `2**16` (= 65,536) | ## Ethereum 1.0 deposit contract -The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state. +The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2 (i.e. when the EVM 2.0 is deployed and the shards have state). ### Arguments @@ -52,7 +52,7 @@ The deposit contract has a `deposit` function which takes the amount in Ethereum #### Withdrawal credentials -One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: +One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows: * `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE` * `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey @@ -72,7 +72,7 @@ Every Ethereum 1.0 deposit, of size at least `MIN_DEPOSIT_AMOUNT`, emits a `Depo ### `Eth2Genesis` log -When `CHAIN_START_FULL_DEPOSIT_THRESHOLD` of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: +When `CHAIN_START_FULL_DEPOSIT_THRESHOLD` of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined [here](./0_beacon-chain.md#genesis-state)) where: * `genesis_time` equals `time` in the `Eth2Genesis` log * `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log @@ -82,12 +82,12 @@ When `CHAIN_START_FULL_DEPOSIT_THRESHOLD` of full deposits have been made, the d ## Vyper code -The source for the Vyper contract lives in a [separate repository](https://github.com/ethereum/deposit_contract) at [https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py](https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py). +The source for the Vyper contract lives [here](./../../deposit_contract/contracts/validator_registration.v.py). -Note: to save ~10x on gas this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in python tested for correctness. +*Note*: To save ~10x on gas, this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in Python tested for correctness. For convenience, we provide the interface to the contract here: * `__init__()`: initializes the contract * `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei. +* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. *Note*: The amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei. diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 6936df55c..91c3e27ee 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice -**NOTICE**: This document is a work in progress for researchers and implementers. +**Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -13,12 +13,14 @@ - [Time parameters](#time-parameters) - [Beacon chain processing](#beacon-chain-processing) - [Beacon chain fork choice rule](#beacon-chain-fork-choice-rule) + - [Implementation notes](#implementation-notes) + - [Justification and finality at genesis](#justification-and-finality-at-genesis) ## Introduction -This document represents is the specification for the beacon chain fork choice rule, part of Ethereum 2.0 phase 0. +This document represents the specification for the beacon chain fork choice rule, part of Ethereum 2.0 Phase 0. ## Prerequisites @@ -36,21 +38,21 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". For a beacon block, `block`, to be processed by a node, the following conditions must be met: -* The parent block with root `block.previous_block_root` has been processed and accepted. +* The parent block with root `block.parent_root` has been processed and accepted. * An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted. * The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`. -Note: Leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year. +*Note*: Leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year. -Note: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. +*Note*: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. ### Beacon chain fork choice rule -The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a validator `v` subjectively calculates the beacon chain head as follows. +The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time, a validator `v` subjectively calculates the beacon chain head as follows. -* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the validator `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`. -* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store` the processing of which sets `B` as finalized.) -* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists set `justified_head` to `finalized_head`. +* Abstractly define `Store` as the type of storage object for the chain data, and let `store` be the set of attestations and blocks that the validator `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`. +* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store`, the processing of which sets `B` as finalized.) +* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists, set `justified_head` to `finalized_head`. * Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as: ```python @@ -68,7 +70,7 @@ def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: * Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the validator `v` observed first. * Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`. -* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`. +* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` return the child blocks of the given `block`. * Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`. * The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count. @@ -99,3 +101,9 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) # Ties broken by favoring block with lexicographically higher root head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) ``` + +## Implementation notes + +### Justification and finality at genesis + +During genesis, justification and finality root fields within the `BeaconState` reference `ZERO_HASH` rather than a known block. `ZERO_HASH` in `previous_justified_root`, `current_justified_root`, and `finalized_root` should be considered as an alias to the root of the genesis block. diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 9f1c0e66a..6c89ef853 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 1 -- Custody Game -**NOTICE**: This spec is a work-in-progress for researchers and implementers. +**Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -14,6 +14,7 @@ - [Misc](#misc) - [Time parameters](#time-parameters) - [Max operations per block](#max-operations-per-block) + - [Reward and penalty quotients](#reward-and-penalty-quotients) - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [Custody objects](#custody-objects) @@ -22,47 +23,51 @@ - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) - [`CustodyBitChallengeRecord`](#custodybitchallengerecord) - [`CustodyResponse`](#custodyresponse) + - [New beacon operations](#new-beacon-operations) - [`CustodyKeyReveal`](#custodykeyreveal) + - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) - [Phase 0 container updates](#phase-0-container-updates) - [`Validator`](#validator) - [`BeaconState`](#beaconstate) - [`BeaconBlockBody`](#beaconblockbody) - [Helpers](#helpers) - - [`typeof`](#typeof) - - [`empty`](#empty) + - [`ceillog2`](#ceillog2) - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - - [`epoch_to_custody_period`](#epoch_to_custody_period) + - [`get_chunk_bits_root`](#get_chunk_bits_root) + - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) + - [`get_validators_custody_reveal_period`](#get_validators_custody_reveal_period) - [`replace_empty_or_append`](#replace_empty_or_append) - - [`verify_custody_key`](#verify_custody_key) - [Per-block processing](#per-block-processing) - [Operations](#operations) - - [Custody reveals](#custody-reveals) + - [Custody key reveals](#custody-key-reveals) + - [Early derived secret reveals](#early-derived-secret-reveals) - [Chunk challenges](#chunk-challenges) - [Bit challenges](#bit-challenges) - [Custody responses](#custody-responses) - [Per-epoch processing](#per-epoch-processing) + - [Handling of custody-related deadlines](#handling-of-custody-related-deadlines) ## Introduction -This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [phase 0](0_beacon-chain.md) specification. +This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](0_beacon-chain.md) specification. ## Terminology -* **Custody game**: -* **Custody period**: -* **Custody chunk**: -* **Custody chunk bit**: -* **Custody chunk challenge**: -* **Custody bit**: -* **Custody bit challenge**: -* **Custody key**: -* **Custody key reveal**: -* **Custody key mask**: -* **Custody response**: -* **Custody response deadline**: +* **Custody game**— +* **Custody period**— +* **Custody chunk**— +* **Custody chunk bit**— +* **Custody chunk challenge**— +* **Custody bit**— +* **Custody bit challenge**— +* **Custody key**— +* **Custody key reveal**— +* **Custody key mask**— +* **Custody response**— +* **Custody response deadline**— ## Constants @@ -79,24 +84,34 @@ This document details the beacon chain additions and changes in Phase 1 of Ether | Name | Value | Unit | Duration | | - | - | :-: | :-: | | `MAX_CHUNK_CHALLENGE_DELAY` | `2**11` (= 2,048) | epochs | ~9 days | -| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | | `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days | +| `RANDAO_PENALTY_EPOCHS` | `2**1` (= 2) | epochs | 12.8 minutes | +| `EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS` | `2**14` | epochs | ~73 days | +| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | +| `CUSTODY_PERIOD_TO_RANDAO_PADDING` | `2**11` (= 2,048) | epochs | ~9 days | +| `MAX_REVEAL_LATENESS_DECREMENT` | `2**7` (= 128) | epochs | ~14 hours | ### Max operations per block | Name | Value | | - | - | | `MAX_CUSTODY_KEY_REVEALS` | `2**4` (= 16) | +| `MAX_EARLY_DERIVED_SECRET_REVEALS` | `1` | | `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) | | `MAX_CUSTODY_BIT_CHALLENGES` | `2**2` (= 4) | | `MAX_CUSTODY_RESPONSES` | `2**5` (= 32) | +### Reward and penalty quotients + +| Name | Value | +| - | - | +| `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) | + ### Signature domains | Name | Value | | - | - | -| `DOMAIN_CUSTODY_KEY_REVEAL` | `6` | -| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `7` | +| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `6` | ## Data structures @@ -105,75 +120,91 @@ This document details the beacon chain additions and changes in Phase 1 of Ether #### `CustodyChunkChallenge` ```python -{ - 'responder_index': ValidatorIndex, - 'attestation': Attestation, - 'chunk_index': 'uint64', -} +class CustodyChunkChallenge(Container): + responder_index: ValidatorIndex + attestation: Attestation + chunk_index: uint64 ``` #### `CustodyBitChallenge` ```python -{ - 'responder_index': ValidatorIndex, - 'attestation': Attestation, - 'challenger_index': ValidatorIndex, - 'responder_key': BLSSignature, - 'chunk_bits': Bitfield, - 'signature': BLSSignature, -} +class CustodyBitChallenge(Container): + responder_index: ValidatorIndex + attestation: Attestation + challenger_index: ValidatorIndex + responder_key: Bytes96 + chunk_bits: bytes + signature: Bytes96 ``` #### `CustodyChunkChallengeRecord` ```python -{ - 'challenge_index': 'uint64', - 'challenger_index': ValidatorIndex, - 'responder_index': ValidatorIndex, - 'deadline': Epoch, - 'crosslink_data_root': Hash, - 'depth': 'uint64', - 'chunk_index': 'uint64', -} +class CustodyChunkChallengeRecord(Container): + challenge_index: uint64 + challenger_index: ValidatorIndex + responder_index: ValidatorIndex + inclusion_epoch: Epoch + data_root: Bytes32 + depth: uint64 + chunk_index: uint64 ``` #### `CustodyBitChallengeRecord` ```python -{ - 'challenge_index': 'uint64', - 'challenger_index': ValidatorIndex, - 'responder_index': ValidatorIndex, - 'deadline': Epoch, - 'crosslink_data_root': Hash, - 'chunk_bits': Bitfield, - 'responder_key': BLSSignature, -} +class CustodyBitChallengeRecord(Container): + challenge_index: uint64 + challenger_index: ValidatorIndex + responder_index: ValidatorIndex + inclusion_epoch: Epoch + data_root: Bytes32 + chunk_count: uint64 + chunk_bits_merkle_root: Bytes32 + responder_key: Bytes96 ``` #### `CustodyResponse` ```python -{ - 'challenge_index': 'uint64', - 'chunk_index': 'uint64', - 'chunk': ['byte', BYTES_PER_CUSTODY_CHUNK], - 'branch': [Hash], -} +class CustodyResponse(Container): + challenge_index: uint64 + chunk_index: uint64 + chunk: Vector[bytes, BYTES_PER_CUSTODY_CHUNK] + data_branch: List[Bytes32] + chunk_bits_branch: List[Bytes32] + chunk_bits_leaf: Bytes32 ``` +### New beacon operations + #### `CustodyKeyReveal` ```python -{ - 'revealer_index': ValidatorIndex, - 'period': 'uint64', - 'key': BLSSignature, - 'masker_index': ValidatorIndex, - 'mask': Hash, -} +class CustodyKeyReveal(Container): + # Index of the validator whose key is being revealed + revealer_index: uint64 + # Reveal (masked signature) + reveal: Bytes96 +``` + +#### `EarlyDerivedSecretReveal` + +Represents an early (punishable) reveal of one of the derived secrets, where derived secrets are RANDAO reveals and custody reveals (both are part of the same domain). + +```python +class EarlyDerivedSecretReveal(Container): + # Index of the validator whose key is being revealed + revealed_index: uint64 + # RANDAO epoch of the key that is being revealed + epoch: uint64 + # Reveal (masked signature) + reveal: Bytes96 + # Index of the validator who revealed (whistleblower) + masker_index: uint64 + # Mask used to hide the actual reveal signature (prevent reveal from being stolen) + mask: Bytes32 ``` ### Phase 0 container updates @@ -183,61 +214,97 @@ Add the following fields to the end of the specified container objects. Fields w #### `Validator` ```python - 'custody_reveal_index': 'uint64', - 'max_reveal_lateness': 'uint64', +class Validator(Container): + # next_custody_reveal_period is initialised to the custody period + # (of the particular validator) in which the validator is activated + # = get_validators_custody_reveal_period(...) + next_custody_reveal_period: uint64 + max_reveal_lateness: uint64 ``` #### `BeaconState` ```python - 'custody_chunk_challenge_records': [CustodyChunkChallengeRecord], - 'custody_bit_challenge_records': [CustodyBitChallengeRecord], - 'custody_challenge_index': 'uint64', +class BeaconState(Container): + custody_chunk_challenge_records: List[CustodyChunkChallengeRecord] + custody_bit_challenge_records: List[CustodyBitChallengeRecord] + custody_challenge_index: uint64 + + # Future derived secrets already exposed; contains the indices of the exposed validator + # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS + exposed_derived_secrets: Vector[List[uint64], EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] ``` #### `BeaconBlockBody` ```python - 'custody_key_reveals': [CustodyKeyReveal], - 'custody_chunk_challenges': [CustodyChunkChallenge], - 'custody_bit_challenges': [CustodyBitChallenge], - 'custody_responses': [CustodyResponse], +class BeaconBlockBody(Container): + custody_chunk_challenges: List[CustodyChunkChallenge] + custody_bit_challenges: List[CustodyBitChallenge] + custody_responses: List[CustodyResponse] + custody_key_reveals: List[CustodyKeyReveal] + early_derived_secret_reveals: List[EarlyDerivedSecretReveal] ``` ## Helpers -### `typeof` +### `ceillog2` -The `typeof` function accepts and SSZ object as a single input and returns the corresponding SSZ type. - -### `empty` - -The `empty` function accepts and SSZ type as input and returns an object of that type with all fields initialized to default values. +```python +def ceillog2(x): + return x.bit_length() +``` ### `get_crosslink_chunk_count` ```python -def get_custody_chunk_count(attestation: Attestation) -> int: - crosslink_start_epoch = attestation.data.latest_crosslink.epoch - crosslink_end_epoch = slot_to_epoch(attestation.data.slot) - crosslink_crosslink_length = min(MAX_CROSSLINK_EPOCHS, end_epoch - start_epoch) +def get_custody_chunk_count(crosslink: Crosslink) -> int: + crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch) chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK - return crosslink_crosslink_length * chunks_per_epoch + return crosslink_length * chunks_per_epoch ``` ### `get_custody_chunk_bit` ```python -def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool: +def get_custody_chunk_bit(key: Bytes96, chunk: bytes) -> bool: # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol - return get_bitfield_bit(hash(challenge.responder_key + chunk), 0) + return get_bitfield_bit(hash(key + chunk), 0) ``` -### `epoch_to_custody_period` +### `get_chunk_bits_root` ```python -def epoch_to_custody_period(epoch: Epoch) -> int: - return epoch // EPOCHS_PER_CUSTODY_PERIOD +def get_chunk_bits_root(chunk_bitfield: bytes) -> Bytes32: + aggregated_bits = bytearray([0] * 32) + for i in range(0, len(chunk_bitfield), 32): + for j in range(32): + aggregated_bits[j] ^= chunk_bitfield[i + j] + return hash(aggregated_bits) +``` + +### `get_randao_epoch_for_custody_period` + +```python +def get_randao_epoch_for_custody_period(period: int, validator_index: ValidatorIndex) -> Epoch: + next_period_start = (period + 1) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD + return next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING +``` + +### `get_validators_custody_reveal_period` + +```python +def get_validators_custody_reveal_period(state: BeaconState, + validator_index: ValidatorIndex, + epoch: Epoch=None) -> int: + ''' + This function returns the reveal period for a given validator. + If no epoch is supplied, the current epoch is assumed. + Note: This function implicitly requires that validators are not removed from the + validator set in fewer than EPOCHS_PER_CUSTODY_PERIOD epochs + ''' + epoch = get_current_epoch(state) if epoch is None else epoch + return (epoch + validator_index % EPOCHS_PER_CUSTODY_PERIOD) // EPOCHS_PER_CUSTODY_PERIOD ``` ### `replace_empty_or_append` @@ -245,75 +312,144 @@ def epoch_to_custody_period(epoch: Epoch) -> int: ```python def replace_empty_or_append(list: List[Any], new_element: Any) -> int: for i in range(len(list)): - if list[i] == empty(typeof(new_element)): + if is_empty(list[i]): list[i] = new_element return i list.append(new_element) return len(list) - 1 ``` -### `verify_custody_key` - -```python -def verify_custody_key(state: BeaconState, reveal: CustodyKeyReveal) -> bool: - # Case 1: non-masked non-punitive non-early reveal - pubkeys = [state.validator_registry[reveal.revealer_index].pubkey] - message_hashes = [hash_tree_root(reveal.period)] - - # Case 2: masked punitive early reveal - # Masking prevents proposer stealing the whistleblower reward - # Secure under the aggregate extraction infeasibility assumption - # See pages 11-12 of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf - if reveal.mask != ZERO_HASH: - pubkeys.append(state.validator_registry[reveal.masker_index].pubkey) - message_hashes.append(reveal.mask) - - return bls_verify_multiple( - pubkeys=pubkeys, - message_hashes=message_hashes, - signature=reveal.key, - domain=get_domain( - fork=state.fork, - epoch=reveal.period * EPOCHS_PER_CUSTODY_PERIOD, - domain_type=DOMAIN_CUSTODY_KEY_REVEAL, - ), - ) -``` - ## Per-block processing ### Operations -Add the following operations to the per-block processing, in order the given below and after all other operations in phase 0. +Add the following operations to the per-block processing, in the order given below and after all other operations in Phase 0. -#### Custody reveals +#### Custody key reveals Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`. For each `reveal` in `block.body.custody_key_reveals`, run the following function: ```python -def process_custody_reveal(state: BeaconState, - reveal: CustodyKeyReveal) -> None: - assert verify_custody_key(state, reveal) +def process_custody_key_reveal(state: BeaconState, + reveal: CustodyKeyReveal) -> None: + """ + Process ``CustodyKeyReveal`` operation. + Note that this function mutates ``state``. + """ + revealer = state.validator_registry[reveal.revealer_index] - current_custody_period = epoch_to_custody_period(get_current_epoch(state)) + epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_reveal_period, reveal.revealed_index) - # Case 1: non-masked non-punitive non-early reveal - if reveal.mask == ZERO_HASH: - assert reveal.period == epoch_to_custody_period(revealer.activation_epoch) + revealer.custody_reveal_index - # Revealer is active or exited - assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state) - revealer.custody_reveal_index += 1 - revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period) - proposer_index = get_beacon_proposer_index(state) - increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + assert revealer.next_custody_reveal_period < get_validators_custody_reveal_period(state, reveal.revealed_index) - # Case 2: masked punitive early reveal + # Revealed validator is active or exited, but not withdrawn + assert is_slashable_validator(revealer, get_current_epoch(state)) + + # Verify signature + assert bls_verify( + pubkey=revealer.pubkey, + message_hash=hash_tree_root(epoch_to_sign), + signature=reveal.reveal, + domain=get_domain( + state=state, + domain_type=DOMAIN_RANDAO, + message_epoch=epoch_to_sign, + ), + ) + + # Decrement max reveal lateness if response is timely + if revealer.next_custody_reveal_period == get_validators_custody_reveal_period(state, reveal.revealer_index) - 2: + revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT + revealer.max_reveal_lateness = max( + revealer.max_reveal_lateness, + get_validators_custody_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period + ) + + # Process reveal + revealer.next_custody_reveal_period += 1 + + # Reward Block Preposer + proposer_index = get_beacon_proposer_index(state) + increase_balance(state, proposer_index, get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT) +``` + +#### Early derived secret reveals + +Verify that `len(block.body.early_derived_secret_reveals) <= MAX_EARLY_DERIVED_SECRET_REVEALS`. + +For each `reveal` in `block.body.early_derived_secret_reveals`, run the following function: + +```python +def process_early_derived_secret_reveal(state: BeaconState, + reveal: EarlyDerivedSecretReveal) -> None: + """ + Process ``EarlyDerivedSecretReveal`` operation. + Note that this function mutates ``state``. + """ + + revealed_validator = state.validator_registry[reveal.revealed_index] + masker = state.validator_registry[reveal.masker_index] + derived_secret_location = reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS + + assert reveal.epoch >= get_current_epoch(state) + RANDAO_PENALTY_EPOCHS + assert reveal.epoch < get_current_epoch(state) + EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS + assert revealed_validator.slashed is False + assert reveal.revealed_index not in state.exposed_derived_secrets[derived_secret_location] + + # Verify signature correctness + masker = state.validator_registry[reveal.masker_index] + pubkeys = [revealed_validator.pubkey, masker.pubkey] + message_hashes = [ + hash_tree_root(reveal.epoch), + reveal.mask, + ] + + assert bls_verify_multiple( + pubkeys=pubkeys, + message_hashes=message_hashes, + signature=reveal.reveal, + domain=get_domain( + state=state, + domain_type=DOMAIN_RANDAO, + message_epoch=reveal.epoch, + ), + ) + + if reveal.epoch >= get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING: + # Full slashing when the secret was revealed so early it may be a valid custody + # round key + slash_validator(state, reveal.revealed_index, reveal.masker_index) else: - assert reveal.period > current_custody_period - assert revealer.slashed is False - slash_validator(state, reveal.revealer_index, reveal.masker_index) + # Only a small penalty proportional to proposer slot reward for RANDAO reveal + # that does not interfere with the custody period + # The penalty is proportional to the max proposer reward + + # Calculate penalty + max_proposer_slot_reward = ( + get_base_reward(state, reveal.revealed_index) + * SLOTS_PER_EPOCH + // len(get_active_validator_indices(state, get_current_epoch(state))) + // PROPOSER_REWARD_QUOTIENT + ) + penalty = ( + max_proposer_slot_reward + * EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE + * (len(state.exposed_derived_secrets[derived_secret_location]) + 1) + ) + + # Apply penalty + proposer_index = get_beacon_proposer_index(state) + whistleblower_index = reveal.masker_index + whistleblowing_reward = penalty // WHISTLEBLOWING_REWARD_QUOTIENT + proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward) + decrease_balance(state, reveal.revealed_index, penalty) + + # Mark this derived secret as exposed so validator cannot be punished repeatedly + state.exposed_derived_secrets[derived_secret_location].append(reveal.revealed_index) ``` #### Chunk challenges @@ -326,30 +462,30 @@ For each `challenge` in `block.body.custody_chunk_challenges`, run the following def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None: # Verify the attestation - assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + validate_indexed_attestation(state, convert_to_indexed(state, challenge.attestation)) # Verify it is not too late to challenge assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY responder = state.validator_registry[challenge.responder_index] assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY # Verify the responder participated in the attestation - attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bitfield) assert challenge.responder_index in attesters # Verify the challenge is not a duplicate for record in state.custody_chunk_challenge_records: assert ( - record.crosslink_data_root != challenge.attestation.data.crosslink_data_root or + record.data_root != challenge.attestation.data.crosslink.data_root or record.chunk_index != challenge.chunk_index ) # Verify depth - depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation))) + depth = ceillog2(get_custody_chunk_count(challenge.attestation.data.crosslink)) assert challenge.chunk_index < 2**depth # Add new chunk challenge record new_record = CustodyChunkChallengeRecord( challenge_index=state.custody_challenge_index, challenger_index=get_beacon_proposer_index(state), - responder_index=challenge.responder_index - deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE, - crosslink_data_root=challenge.attestation.data.crosslink_data_root, + responder_index=challenge.responder_index, + inclusion_epoch=get_current_epoch(state), + data_root=challenge.attestation.data.crosslink.data_root, depth=depth, chunk_index=challenge.chunk_index, ) @@ -369,6 +505,7 @@ For each `challenge` in `block.body.custody_bit_challenges`, run the following f ```python def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> None: + # Verify challenge signature challenger = state.validator_registry[challenge.challenger_index] assert bls_verify( @@ -377,50 +514,63 @@ def process_bit_challenge(state: BeaconState, signature=challenge.signature, domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE), ) - # Verify the challenger is not slashed - assert challenger.slashed is False + assert is_slashable_validator(challenger, get_current_epoch(state)) + # Verify the attestation - assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + attestation = challenge.attestation + validate_indexed_attestation(state, convert_to_indexed(state, attestation)) # Verify the attestation is eligible for challenging responder = state.validator_registry[challenge.responder_index] - min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness) - assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) + assert (slot_to_epoch(attestation.data.slot) + responder.max_reveal_lateness <= + get_validators_custody_reveal_period(state, challenge.responder_index)) + # Verify the responder participated in the attestation attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) assert challenge.responder_index in attesters - # A validator can be the challenger or responder for at most one challenge at a time + + # A validator can be the challenger for at most one challenge at a time for record in state.custody_bit_challenge_records: assert record.challenger_index != challenge.challenger_index - assert record.responder_index != challenge.responder_index - # Verify the responder key - assert verify_custody_key(state, CustodyKeyReveal( - revealer_index=challenge.responder_index, - period=epoch_to_custody_period(slot_to_epoch(attestation.data.slot)), - key=challenge.responder_key, - masker_index=0, - mask=ZERO_HASH, - )) + + # Verify the responder is a valid custody key + epoch_to_sign = get_randao_epoch_for_custody_period( + get_validators_custody_reveal_period( + state=state, + index=challenge.responder_index, + epoch=slot_to_epoch(attestation.data.slot)), + challenge.responder_index + ) + assert bls_verify( + pubkey=responder.pubkey, + message_hash=hash_tree_root(epoch_to_sign), + signature=challenge.responder_key, + domain=get_domain( + state=state, + domain_type=DOMAIN_RANDAO, + message_epoch=epoch_to_sign, + ), + ) + # Verify the chunk count - chunk_count = get_custody_chunk_count(challenge.attestation) + chunk_count = get_custody_chunk_count(attestation.data.crosslink) assert verify_bitfield(challenge.chunk_bits, chunk_count) - # Verify the xor of the chunk bits does not equal the custody bit - chunk_bits_xor = 0b0 - for i in range(chunk_count): - chunk_bits_xor ^ get_bitfield_bit(challenge.chunk_bits, i) - custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index)) - assert custody_bit != chunk_bits_xor + # Verify the first bit of the hash of the chunk bits does not equal the custody bit + custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(challenge.responder_index)) + assert custody_bit != get_bitfield_bit(get_chunk_bits_root(challenge.chunk_bits), 0) # Add new bit challenge record new_record = CustodyBitChallengeRecord( challenge_index=state.custody_challenge_index, challenger_index=challenge.challenger_index, responder_index=challenge.responder_index, - deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE - crosslink_data_root=challenge.attestation.crosslink_data_root, - chunk_bits=challenge.chunk_bits, + inclusion_epoch=get_current_epoch(state), + data_root=attestation.data.crosslink.data_root, + chunk_count=chunk_count, + chunk_bits_merkle_root=hash_tree_root(challenge.chunk_bits), responder_key=challenge.responder_key, ) replace_empty_or_append(state.custody_bit_challenge_records, new_record) state.custody_challenge_index += 1 + # Postpone responder withdrawability responder.withdrawable_epoch = FAR_FUTURE_EPOCH ``` @@ -434,11 +584,13 @@ For each `response` in `block.body.custody_responses`, run the following functio ```python def process_custody_response(state: BeaconState, response: CustodyResponse) -> None: - chunk_challenge = next(record for record in state.custody_chunk_challenge_records if record.challenge_index == response.challenge_index, None) + chunk_challenge = next((record for record in state.custody_chunk_challenge_records + if record.challenge_index == response.challenge_index), None) if chunk_challenge is not None: return process_chunk_challenge_response(state, response, chunk_challenge) - bit_challenge = next(record for record in state.custody_bit_challenge_records if record.challenge_index == response.challenge_index, None) + bit_challenge = next((record for record in state.custody_bit_challenge_records + if record.challenge_index == response.challenge_index), None) if bit_challenge is not None: return process_bit_challenge_response(state, response, bit_challenge) @@ -451,20 +603,24 @@ def process_chunk_challenge_response(state: BeaconState, challenge: CustodyChunkChallengeRecord) -> None: # Verify chunk index assert response.chunk_index == challenge.chunk_index + # Verify bit challenge data is null + assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == ZERO_HASH + # Verify minimum delay + assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY # Verify the chunk matches the crosslink data root assert verify_merkle_branch( leaf=hash_tree_root(response.chunk), - branch=response.branch, + branch=response.data_branch, depth=challenge.depth, index=response.chunk_index, - root=challenge.crosslink_data_root, + root=challenge.data_root, ) # Clear the challenge records = state.custody_chunk_challenge_records records[records.index(challenge)] = CustodyChunkChallengeRecord() # Reward the proposer proposer_index = get_beacon_proposer_index(state) - increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + increase_balance(state, proposer_index, get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT) ``` ```python @@ -472,17 +628,29 @@ def process_bit_challenge_response(state: BeaconState, response: CustodyResponse, challenge: CustodyBitChallengeRecord) -> None: # Verify chunk index - assert response.chunk_index < len(challenge.chunk_bits) + assert response.chunk_index < challenge.chunk_count + # Verify responder has not been slashed + responder = state.validator_registry[challenge.responder_index] + assert not responder.slashed # Verify the chunk matches the crosslink data root assert verify_merkle_branch( leaf=hash_tree_root(response.chunk), - branch=response.branch, - depth=math.log2(next_power_of_two(len(challenge.chunk_bits))), + branch=response.data_branch, + depth=ceillog2(challenge.chunk_count), index=response.chunk_index, - root=challenge.crosslink_data_root, + root=challenge.data_root, + ) + # Verify the chunk bit leaf matches the challenge data + assert verify_merkle_branch( + leaf=response.chunk_bits_leaf, + branch=response.chunk_bits_branch, + depth=ceillog2(challenge.chunk_count) >> 8, + index=response.chunk_index // 256, + root=challenge.chunk_bits_merkle_root ) # Verify the chunk bit does not match the challenge chunk bit - assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index) + assert (get_custody_chunk_bit(challenge.responder_key, response.chunk) + != get_bitfield_bit(challenge.chunk_bits_leaf, response.chunk_index % 256)) # Clear the challenge records = state.custody_bit_challenge_records records[records.index(challenge)] = CustodyBitChallengeRecord() @@ -492,38 +660,58 @@ def process_bit_challenge_response(state: BeaconState, ## Per-epoch processing -Run `process_challenge_deadlines(state)` immediately after `process_ejections(state)`: +### Handling of custody-related deadlines + +Run `process_reveal_deadlines(state)` immediately after `process_registry_updates(state)`: ```python +# begin insert @process_reveal_deadlines + process_reveal_deadlines(state) +# end insert @process_reveal_deadlines +def process_reveal_deadlines(state: BeaconState) -> None: + for index, validator in enumerate(state.validator_registry): + deadline = validator.next_custody_reveal_period + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD) + if get_validators_custody_reveal_period(state, index) > deadline: + slash_validator(state, index) +``` + +Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadlines(state)`: + +```python +# begin insert @process_challenge_deadlines + process_challenge_deadlines(state) +# end insert @process_challenge_deadlines def process_challenge_deadlines(state: BeaconState) -> None: for challenge in state.custody_chunk_challenge_records: - if get_current_epoch(state) > challenge.deadline: + if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE: slash_validator(state, challenge.responder_index, challenge.challenger_index) records = state.custody_chunk_challenge_records records[records.index(challenge)] = CustodyChunkChallengeRecord() for challenge in state.custody_bit_challenge_records: - if get_current_epoch(state) > challenge.deadline: + if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE: slash_validator(state, challenge.responder_index, challenge.challenger_index) records = state.custody_bit_challenge_records records[records.index(challenge)] = CustodyBitChallengeRecord() ``` -In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): +Append this to `process_final_updates(state)`: ```python -def eligible(index): - validator = state.validator_registry[index] - # Cannot exit if there are still open chunk challenges - if len([record for record in state.custody_chunk_challenge_records if record.responder_index == index]) > 0: - return False - # Cannot exit if you have not revealed all of your custody keys - elif epoch_to_custody_period(revealer.activation_epoch) + validator.custody_reveal_index <= epoch_to_custody_period(validator.exit_epoch): - return False - # Cannot exit if you already have - elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH: - return False - # Return minimum time - else: - return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS +# begin insert @after_process_final_updates + after_process_final_updates(state) +# end insert @after_process_final_updates +def after_process_final_updates(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + # Clean up exposed RANDAO key reveals + state.exposed_derived_secrets[current_epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = [] + # Reset withdrawable epochs if challenge records are empty + records = state.custody_chunk_challenge_records + state.custody_bit_challenge_records + validator_indices_in_records = set( + [record.challenger_index for record in records] + [record.responder_index for record in records] + ) + for index, validator in enumerate(state.validator_registry): + if index not in validator_indices_in_records: + if validator.exit_epoch != FAR_FUTURE_EPOCH and validator.withdrawable_epoch == FAR_FUTURE_EPOCH: + validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY ``` diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 33ef8632b..21e08e7c9 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -1,13 +1,13 @@ # Ethereum 2.0 Phase 1 -- Shard Data Chains -**NOTICE**: This document is a work-in-progress for researchers and implementers. +**Notice**: This document is a work-in-progress for researchers and implementers. -## Table of Contents +## Table of contents -- [Ethereum 2.0 Phase 1 -- Shards Data Chains](#ethereum-20-phase-1----shard-data-chains) - - [Table of Contents](#table-of-contents) +- [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains) + - [Table of contents](#table-of-contents) - [Introduction](#introduction) - [Constants](#constants) - [Misc](#misc) @@ -15,9 +15,9 @@ - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [`ShardBlockBody`](#shardblockbody) + - [`ShardAttestation`](#shardattestation) - [`ShardBlock`](#shardblock) - [`ShardBlockHeader`](#shardblockheader) - - [`ShardAttestation`](#shardattestation) - [Helper functions](#helper-functions) - [`get_period_committee`](#get_period_committee) - [`get_switchover_epoch`](#get_switchover_epoch) @@ -46,15 +46,17 @@ This document describes the shard data layer and the shard fork choice rule in P | - | - | | `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) | | `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) | -| `PHASE_1_GENESIS_EPOCH` | **TBD** | -| `PHASE_1_GENESIS_SLOT` | get_epoch_start_slot(PHASE_1_GENESIS_EPOCH) | +| `PHASE_1_FORK_EPOCH` | **TBD** | +| `PHASE_1_FORK_SLOT` | **TBD** | +| `GENESIS_SHARD_SLOT` | 0 | ### Time parameters | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `CROSSLINK_LOOKBACK` | 2**0 (= 1) | epochs | 6.2 minutes | -| `PERSISTENT_COMMITTEE_PERIOD` | 2**11 (= 2,048) | epochs | ~9 days | +| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.2 minutes | +| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | +| `SECONDS_PER_SLOT` | `2**1 * 3**1` (= 6) | 6 seconds | ### Signature domains @@ -68,51 +70,48 @@ This document describes the shard data layer and the shard fork choice rule in P ### `ShardBlockBody` ```python -['byte', BYTES_PER_SHARD_BLOCK_BODY] -``` - -### `ShardBlock` - -```python -{ - 'slot': Slot, - 'shard': Shard, - 'beacon_chain_root': Hash, - 'previous_block_root': Hash, - 'data': ShardBlockBody, - 'state_root': Hash, - 'attestations': [ShardAttestation], - 'signature': BLSSignature, -} -``` - -### `ShardBlockHeader` - -```python -{ - 'slot': Slot, - 'shard': Shard, - 'beacon_chain_root': Hash, - 'previous_block_root': Hash, - 'body_root': Hash, - 'state_root': Hash, - 'attestations': [ShardAttestation], - 'signature': BLSSignature, -} +class ShardBlockBody(Container): + data: Vector[bytes, BYTES_PER_SHARD_BLOCK_BODY] ``` ### `ShardAttestation` ```python -{ - 'data': { - 'slot': Slot, - 'shard': Shard, - 'shard_block_root': Hash, - }, - 'aggregation_bitfield': Bitfield, - 'aggregate_signature': BLSSignature, -} +class ShardAttestation(Container): + class data(Container): + slot: uint64 + shard: uint64 + shard_block_root: Bytes32 + aggregation_bitfield: bytes + aggregate_signature: Bytes96 +``` + +### `ShardBlock` + +```python +class ShardBlock(Container): + slot: uint64 + shard: uint64 + beacon_chain_root: Bytes32 + parent_root: Bytes32 + data: ShardBlockBody + state_root: Bytes32 + attestations: List[ShardAttestation] + signature: Bytes96 +``` + +### `ShardBlockHeader` + +```python +class ShardBlockHeader(Container): + slot: uint64 + shard: uint64 + beacon_chain_root: Bytes32 + parent_root: Bytes32 + body_root: Bytes32 + state_root: Bytes32 + attestations: List[ShardAttestation] + signature: Bytes96 ``` ## Helper functions @@ -120,7 +119,11 @@ This document describes the shard data layer and the shard fork choice rule in P ### `get_period_committee` ```python -def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard, index: int, count: int) -> List[ValidatorIndex]: +def get_period_committee(state: BeaconState, + epoch: Epoch, + shard: Shard, + index: int, + count: int) -> List[ValidatorIndex]: """ Return committee for a period. Used to construct persistent committees. """ @@ -137,7 +140,8 @@ def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard, index: ```python def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex): earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 - return bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD + return (bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8])) + % PERSISTENT_COMMITTEE_PERIOD) ``` ### `get_persistent_committee` @@ -180,7 +184,7 @@ def get_shard_proposer_index(state: BeaconState, slot: Slot) -> ValidatorIndex: # Randomly shift persistent committee persistent_committee = get_persistent_committee(state, shard, slot) - seed = hash(state.current_shuffling_seed + int_to_bytes8(shard) + int_to_bytes8(slot)) + seed = hash(state.current_shuffling_seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) random_index = bytes_to_int(seed[0:8]) % len(persistent_committee) persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index] @@ -198,14 +202,14 @@ def get_shard_proposer_index(state: BeaconState, ```python def get_shard_header(block: ShardBlock) -> ShardBlockHeader: return ShardBlockHeader( - slot: block.slot, - shard: block.shard, - beacon_chain_root: block.beacon_chain_root, - previous_block_root: block.previous_block_root, - body_root: hash_tree_root(block.body), - state_root: block.state_root, - attestations: block.attestations, - signature: block.signature, + slot=block.slot, + shard=block.shard, + beacon_chain_root=block.beacon_chain_root, + parent_root=block.parent_root, + body_root=hash_tree_root(block.body), + state_root=block.state_root, + attestations=block.attestations, + signature=block.signature, ) ``` @@ -219,7 +223,7 @@ def verify_shard_attestation_signature(state: BeaconState, assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee)) pubkeys = [] for i, index in enumerate(persistent_committee): - if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1 + if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1: validator = state.validator_registry[index] assert is_active_validator(validator, get_current_epoch(state)) pubkeys.append(validator.pubkey) @@ -234,7 +238,7 @@ def verify_shard_attestation_signature(state: BeaconState, ### `compute_crosslink_data_root` ```python -def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: +def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Bytes32: def is_power_of_two(value: int) -> bool: return (value > 0) and (value & (value - 1) == 0) @@ -243,15 +247,20 @@ def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: values += [b'\x00' * BYTES_PER_SHARD_BLOCK_BODY] return values - def merkle_root_of_bytes(data: bytes) -> bytes: - return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) + def hash_tree_root_of_bytes(data: bytes) -> bytes: + return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)]) + + def zpad(data: bytes, length: int) -> bytes: + return data + b'\x00' * (length - len(data)) return hash( - merkle_root(pad_to_power_of_2([ - merkle_root_of_bytes(zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY)) for block in blocks - ])) + - merkle_root(pad_to_power_of_2([ - merkle_root_of_bytes(block.body) for block in blocks + hash_tree_root(pad_to_power_of_2([ + hash_tree_root_of_bytes( + zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY) + ) for block in blocks + ])) + + hash_tree_root(pad_to_power_of_2([ + hash_tree_root_of_bytes(block.body) for block in blocks ])) ) ``` @@ -265,64 +274,61 @@ Let: * `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` * `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` * `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined -* `unix_time` be the current unix time * `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` ```python def is_valid_shard_block(beacon_blocks: List[BeaconBlock], beacon_state: BeaconState, valid_shard_blocks: List[ShardBlock], - unix_time: uint64, - candidate: ShardBlock) -> bool + candidate: ShardBlock) -> bool: # Check if block is already determined valid for _, block in enumerate(valid_shard_blocks): if candidate == block: return True # Check slot number - assert block.slot >= PHASE_1_GENESIS_SLOT - assert unix_time >= beacon_state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT + assert candidate.slot >= PHASE_1_FORK_SLOT # Check shard number - assert block.shard <= SHARD_COUNT + assert candidate.shard <= SHARD_COUNT # Check beacon block - beacon_block = beacon_blocks[block.slot] - assert block.beacon_block_root == signing_root(beacon_block) - assert beacon_block.slot <= block.slot: + beacon_block = beacon_blocks[candidate.slot] + assert candidate.beacon_block_root == signing_root(beacon_block) + assert beacon_block.slot <= candidate.slot # Check state root - assert block.state_root == ZERO_HASH # [to be removed in phase 2] + assert candidate.state_root == ZERO_HASH # [to be removed in phase 2] # Check parent block - if block.slot == PHASE_1_GENESIS_SLOT: - assert candidate.previous_block_root == ZERO_HASH + if candidate.slot == PHASE_1_FORK_SLOT: + assert candidate.parent_root == ZERO_HASH else: parent_block = next( - block for block in valid_shard_blocks if - signing_root(block) == candidate.previous_block_root - , None) - assert parent_block != None - assert parent_block.shard == block.shard - assert parent_block.slot < block.slot + (block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root), + None + ) + assert parent_block is not None + assert parent_block.shard == candidate.shard + assert parent_block.slot < candidate.slot assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root # Check attestations - assert len(block.attestations) <= MAX_SHARD_ATTESTIONS - for _, attestation in enumerate(block.attestations): - assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert attestation.data.shard == block.shard + assert len(candidate.attestations) <= MAX_SHARD_ATTESTIONS + for _, attestation in enumerate(candidate.attestations): + assert max(GENESIS_SHARD_SLOT, candidate.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attestation.data.slot <= candidate.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attestation.data.crosslink.shard == candidate.shard verify_shard_attestation_signature(beacon_state, attestation) # Check signature - proposer_index = get_shard_proposer_index(beacon_state, block.shard, block.slot) + proposer_index = get_shard_proposer_index(beacon_state, candidate.shard, candidate.slot) assert proposer_index is not None assert bls_verify( - pubkey=validators[proposer_index].pubkey, + pubkey=beacon_state.validator_registry[proposer_index].pubkey, message_hash=signing_root(block), - signature=block.signature, - domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER) + signature=candidate.signature, + domain=get_domain(beacon_state, slot_to_epoch(candidate.slot), DOMAIN_SHARD_PROPOSER), ) return True @@ -339,18 +345,18 @@ Let: ```python def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], beacon_state: BeaconState, - candidate: Attestation) -> bool: + candidate: ShardAttestation) -> bool: # Check shard block shard_block = next( - block for block in valid_shard_blocks if - signing_root(block) == candidate.attestation.data.shard_block_root - , None) - assert shard_block != None - assert shard_block.slot == attestation.data.slot - assert shard_block.shard == attestation.data.shard + (block for block in valid_shard_blocks if signing_root(block) == candidate.data.shard_block_root), + None, + ) + assert shard_block is not None + assert shard_block.slot == candidate.data.slot + assert shard_block.shard == candidate.data.shard # Check signature - verify_shard_attestation_signature(beacon_state, attestation) + verify_shard_attestation_signature(beacon_state, candidate) return True ``` @@ -363,7 +369,7 @@ Let: * `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` * `beacon_state` be the canonical `BeaconState` * `valid_attestations` be the list of valid `Attestation`, recursively defined -* `candidate` be a candidate `Attestation` which is valid under phase 0 rules, and for which validity is to be determined under phase 1 rules by running `is_valid_beacon_attestation` +* `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation` ```python def is_valid_beacon_attestation(shard: Shard, @@ -377,23 +383,24 @@ def is_valid_beacon_attestation(shard: Shard, return True # Check previous attestation - if candidate.data.previous_crosslink.epoch <= PHASE_1_GENESIS_EPOCH: - assert candidate.data.previous_crosslink.crosslink_data_root == ZERO_HASH + if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH: + assert candidate.data.previous_crosslink.data_root == ZERO_HASH else: previous_attestation = next( - attestation for attestation in valid_attestations if - attestation.data.crosslink_data_root == candidate.data.previous_crosslink.crosslink_data_root - , None) - assert previous_attestation != None + (attestation for attestation in valid_attestations if + attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root), + None, + ) + assert previous_attestation is not None assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot) # Check crosslink data root - start_epoch = state.latest_crosslinks[shard].epoch - end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_CROSSLINK_EPOCHS) + start_epoch = beacon_state.latest_crosslinks[shard].epoch + end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_EPOCHS_PER_CROSSLINK) blocks = [] for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): blocks.append(shard_blocks[slot]) - assert candidate.data.crosslink_data_root == compute_crosslink_data_root(blocks) + assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) return True ``` diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index b38167bb5..f009d9737 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -1,16 +1,19 @@ -**NOTICE**: This document is a work-in-progress for researchers and implementers. +# Merkle proof formats -## Table of Contents +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents -- [Table of Contents](#table-of-contents) -- [Constants](#constants) -- [Generalized Merkle tree index](#generalized-merkle-tree-index) -- [SSZ object to index](#ssz-object-to-index) -- [Merkle multiproofs](#merkle-multiproofs) -- [MerklePartial](#merklepartial) - - [`SSZMerklePartial`](#sszmerklepartial) - - [Proofs for execution](#proofs-for-execution) +- [Merkle proof formats](#merkle-proof-formats) + - [Table of contents](#table-of-contents) + - [Constants](#constants) + - [Generalized Merkle tree index](#generalized-merkle-tree-index) + - [SSZ object to index](#ssz-object-to-index) + - [Merkle multiproofs](#merkle-multiproofs) + - [MerklePartial](#merklepartial) + - [`SSZMerklePartial`](#sszmerklepartial) + - [Proofs for execution](#proofs-for-execution) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 7cb1f6928..8501c5869 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,13 +1,13 @@ # Beacon Chain Light Client Syncing -__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendliness, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. +**Notice**: This document is a work-in-progress for researchers and implementers. One of the design goals of the Eth 2.0 beacon chain is light-client friendliness, not only to allow low-resource clients (mobile phones, IoT, etc.) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the Eth 2.0 beacon chain and other chains. -## Table of Contents +## Table of contents - [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) - - [Table of Contents](#table-of-contents) + - [Table of contents](#table-of-contents) - [Preliminaries](#preliminaries) - [Expansions](#expansions) - [`get_active_validator_indices`](#get_active_validator_indices) @@ -146,7 +146,7 @@ def compute_committee(header: BeaconBlockHeader, ] def get_switchover_epoch(index): return ( - bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes3(index))[0:8]) % + bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes(index, length=3))[0:8]) % PERSISTENT_COMMITTEE_PERIOD ) diff --git a/specs/networking/libp2p-standardization.md b/specs/networking/libp2p-standardization.md new file mode 100644 index 000000000..d1ba07e65 --- /dev/null +++ b/specs/networking/libp2p-standardization.md @@ -0,0 +1,158 @@ +ETH 2.0 Networking Spec - Libp2p standard protocols +=== + +# Abstract + +Ethereum 2.0 clients plan to use the libp2p protocol networking stack for +mainnet release. This document aims to standardize the libp2p client protocols, +configuration and messaging formats. + +# Libp2p Components + +## Transport + +This section details the libp2p transport layer that underlies the +[protocols](#protocols) that are listed in this document. + +Libp2p allows composition of multiple transports. Eth2.0 clients should support +TCP/IP and optionally websockets. Websockets are useful for implementations +running in the browser and therefore native clients would ideally support these implementations +by supporting websockets. + +An ideal libp2p transport would therefore support both TCP/IP and websockets. + +*Note: There is active development in libp2p to facilitate the +[QUIC](https://github.com/libp2p/go-libp2p-quic-transport) transport, which may +be adopted in the future* + +### Encryption + +Libp2p currently offers [Secio](https://github.com/libp2p/specs/pull/106) which +can upgrade a transport which will then encrypt all future communication. Secio +generates a symmetric ephemeral key which peers use to encrypt their +communication. It can support a range of ciphers and currently supports key +derivation for elliptic curve-based public keys. + +Current defaults are: +- Key agreement: `ECDH-P256` (also supports `ECDH-P384`) +- Cipher: `AES-128` (also supports `AES-256`, `TwofishCTR`) +- Digests: `SHA256` (also supports `SHA512`) + +*Note: Secio is being deprecated in favour of [TLS +1.3](https://github.com/libp2p/specs/blob/master/tls/tls.md). It is our +intention to transition to use TLS 1.3 for encryption between nodes, rather +than Secio.* + + +## Protocols + +This section lists the necessary libp2p protocols required by Ethereum 2.0 +running a libp2p network stack. + +## Multistream-select + +#### Protocol id: `/multistream/1.0.0` + +Clients running libp2p should support the +[multistream-select](https://github.com/multiformats/multistream-select/) +protocol which allows clients to negotiate libp2p protocols establish streams +per protocol. + +## Multiplexing + +Libp2p allows clients to compose multiple multiplexing methods. Clients should +support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and +optionally [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) +(these can be composed). + +**Mplex protocol id: `/mplex/6.7.0`** + +**Yamux protocol id: `/yamux/1.0.0`** + +## Gossipsub + +#### Protocol id: `/eth/serenity/gossipsub/1.0.0` + +*Note: Parameters listed here are subject to a large-scale network feasibility +study* + +The [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) +protocol is used for block and attestation propagation across the +network. + +### Configuration Parameters + +Gossipsub has a number of internal configuration parameters which directly +effect the network performance. Clients can implement independently, however +we aim to standardize these across clients to optimize the gossip network for +propagation times and message duplication. Current network-related defaults are: + +``` +( + // The target number of peers in the overlay mesh network (D in the libp2p specs). + mesh_size: 6 + // The minimum number of peers in the mesh network before adding more (D_lo in the libp2p specs). + mesh_lo: 4 + // The maximum number of peers in the mesh network before removing some (D_high in the libp2p sepcs). + mesh_high: 12 + // The number of peers to gossip to during a heartbeat (D_lazy in the libp2p sepcs). + gossip_lazy: 6 // defaults to `mesh_size` + // Time to live for fanout peers (seconds). + fanout_ttl: 60 + // The number of heartbeats to gossip about. + gossip_history: 3 + // Time between each heartbeat (seconds). + heartbeat_interval: 1 +) +``` + +### Topics + +*The Go and Js implementations use string topics - This is likely to be +updated to topic hashes in later versions - https://github.com/libp2p/rust-libp2p/issues/473* + +For Eth2.0 clients, topics are sent as `SHA2-256` hashes of the topic string. + +There are two main topics used to propagate attestations and beacon blocks to +all nodes on the network. + +- The `beacon_block` topic - This topic is used solely for propagating new + beacon blocks to all nodes on the networks. +- The `beacon_attestation` topic - This topic is used to propagate + aggregated attestations to subscribing nodes (typically block proposers) to + be included into future blocks. Attestations are aggregated in their + respective subnets before publishing on this topic. + +Shards are grouped into their own subnets (defined by a shard topic). The +number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard +`shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: +`shard{shard_number % SHARD_SUBNET_COUNT}_attestation`. + +### Messages + +*Note: The message format here is Eth2.0-specific* + +Each Gossipsub +[Message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) +has a maximum size of 512KB (estimated from expected largest uncompressed block +size). + +The `data` field of a Gossipsub `Message` is an SSZ-encoded object. For the `beacon_block` topic, +this is a `beacon_block`. For the `beacon_attestation` topic, this is +an `attestation`. + +## Eth-2 RPC + +#### Protocol Id: `/eth/serenity/beacon/rpc/1` + +The [RPC Interface](./rpc-interface.md) is specified in this repository. + +## Discovery + +Discovery Version 5 +([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) +will be used for discovery. This protocol uses a UDP transport and specifies +its own encryption, ip-discovery and topic advertisement. Therefore, it has no +need to establish streams through `multistream-select`, rather, act +as a standalone implementation that feeds discovered peers/topics (ENR-records) as +`multiaddrs` into the libp2p service. diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md index b64e1d5d8..d7cb5bb5b 100644 --- a/specs/networking/messaging.md +++ b/specs/networking/messaging.md @@ -1,23 +1,22 @@ -ETH 2.0 Networking Spec - Messaging -=== +# Eth 2.0 Networking Spec - Messaging -# Abstract +## Abstract This specification describes how individual Ethereum 2.0 messages are represented on the wire. -The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in RFC 2119. +The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). -# Motivation +## Motivation -This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves. +This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the Eth 2.0 specification evolves. Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront. -# Specification +## Specification -## Message Structure +### Message structure -An ETH 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself. +An Eth 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself. Visually, a message looks like this: @@ -35,12 +34,12 @@ Visually, a message looks like this: +--------------------------+ ``` -Clients MUST ignore messages with mal-formed bodies. The compression/encoding nibbles MUST be one of the following values: +Clients MUST ignore messages with malformed bodies. The compression/encoding nibbles MUST be one of the following values: -## Compression Nibble Values +### Compression nibble values - `0x0`: no compression -## Encoding Nibble Values +### Encoding nibble values - `0x1`: SSZ diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md index 0f1f9832b..32ec4dfad 100644 --- a/specs/networking/node-identification.md +++ b/specs/networking/node-identification.md @@ -1,13 +1,12 @@ -ETH 2.0 Networking Spec - Node Identification -=== +# Eth 2.0 Networking Spec - Node Identification -# Abstract +## Abstract This specification describes how Ethereum 2.0 nodes identify and address each other on the network. -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). -# Specification +## Specification Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys: @@ -21,11 +20,11 @@ The keys above are enough to construct a [multiaddr](https://github.com/multifor It is RECOMMENDED that clients set their TCP port to the default of `9000`. -## Peer ID Generation +### Peer ID generation The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. -# See Also +## See also - [multiaddr](https://github.com/multiformats/multiaddr) - [multihash](https://multiformats.io/multihash/) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index f1da8f7e3..b81f78408 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -1,19 +1,18 @@ -ETH 2.0 Networking Spec - RPC Interface -=== +# Eth 2.0 Networking Spec - RPC Interface -# Abstract +## Abstract The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol. -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). -# Dependencies +## Dependencies This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications. # Specification -## Message Schemas +## Message schemas Message body schemas are notated like this: @@ -26,13 +25,13 @@ Message body schemas are notated like this: Embedded types are serialized as SSZ Containers unless otherwise noted. -All referenced data structures can be found in the [0-beacon-chain](../core/0_beacon-chain.md#data-structures) specification. +All referenced data structures can be found in the [Beacon Chain](../core/0_beacon-chain.md#data-structures) specification. -## `libp2p` Protocol Names +## `libp2p` protocol names -A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualised thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID. +A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualized thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID. -## RPC-Over-`libp2p` +## RPC-over-`libp2p` To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required. @@ -42,7 +41,7 @@ Remote method calls are wrapped in a "request" structure: ( id: uint64 method_id: uint16 - body: Request + body: (message_body...) ) ``` @@ -56,15 +55,7 @@ and their corresponding responses are wrapped in a "response" structure: ) ``` -If an error occurs, a variant of the response structure is returned: - -``` -( - id: uint64 - response_code: uint16 - result: bytes -) -``` +A union type is used to determine the contents of the `body` field in the request structure. Each "body" entry in the RPC calls below corresponds to one subtype in the `body` type union. The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically: @@ -88,7 +79,7 @@ The first 1,000 values in `response_code` are reserved for system use. The follo 3. `30`: Method not found. 4. `40`: Server error. -### Alternative for Non-`libp2p` Clients +### Alternative for non-`libp2p` clients Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages. @@ -145,7 +136,7 @@ Root B ^ +---+ ``` -Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e., RPC method `10`). +Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e. RPC method `10`). ### Goodbye @@ -167,11 +158,11 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MAY be o Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. -### Get Status +### Get status **Method ID:** `2` -**Request Body:** +**Request body:** ``` ( @@ -181,7 +172,7 @@ Clients MAY define custom goodbye reasons as long as the value is larger than `1 ) ``` -**Response Body:** +**Response body:** ``` ( @@ -193,11 +184,11 @@ Clients MAY define custom goodbye reasons as long as the value is larger than `1 Returns metadata about the remote node. -### Request Beacon Block Roots +### Request beacon block roots **Method ID:** `10` -**Request Body** +**Request body** ``` ( @@ -206,7 +197,7 @@ Returns metadata about the remote node. ) ``` -**Response Body:** +**Response body:** ``` # BlockRootSlot @@ -222,11 +213,11 @@ Returns metadata about the remote node. Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order. -### Beacon Block Headers +### Beacon block headers **Method ID:** `11` -**Request Body** +**Request body** ``` ( @@ -237,7 +228,7 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU ) ``` -**Response Body:** +**Response body:** ``` ( @@ -245,15 +236,15 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU ) ``` -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]`—i.e. duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data. -### Beacon Block Bodies +### Beacon block bodies **Method ID:** `12` -**Request Body:** +**Request body:** ``` ( @@ -261,7 +252,7 @@ The function of the `skip_slots` parameter helps facilitate light client sync - ) ``` -**Response Body:** +**Response body:** ``` ( @@ -269,15 +260,15 @@ The function of the `skip_slots` parameter helps facilitate light client sync - ) ``` -Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e., a `block_body` container with all zero fields). +Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e. a `block_body` container with all zero fields). -### Beacon Chain State +### Beacon chain state -**Note:** This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations. +*Note*: This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations. **Method ID:** `13` -**Request Body:** +**Request body:** ``` ( @@ -285,7 +276,7 @@ Requests the `block_bodies` associated with the provided `block_roots` from the ) ``` -**Response Body:** TBD +**Response body:** TBD Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`. diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 0080f2447..2adff2388 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -1,23 +1,30 @@ # SimpleSerialize (SSZ) -This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects. +**Notice**: This document is a work-in-progress describing typing, serialization, and Merkleization of Eth 2.0 objects. ## Table of contents + -- [Constants](#constants) -- [Typing](#typing) - - [Basic types](#basic-types) - - [Composite types](#composite-types) - - [Aliases](#aliases) - - [Default values](#default-values) -- [Serialization](#serialization) - - [`"uintN"`](#uintn) - - [`"bool"`](#bool) - - [Containers, vectors, lists](#containers-vectors-lists) -- [Deserialization](#deserialization) -- [Merkleization](#merkleization) -- [Self-signed containers](#self-signed-containers) -- [Implementations](#implementations) +- [SimpleSerialize (SSZ)](#simpleserialize-ssz) + - [Table of contents](#table-of-contents) + - [Constants](#constants) + - [Typing](#typing) + - [Basic types](#basic-types) + - [Composite types](#composite-types) + - [Aliases](#aliases) + - [Default values](#default-values) + - [Illegal types](#illegal-types) + - [Serialization](#serialization) + - [`"uintN"`](#uintn) + - [`"bool"`](#bool) + - [`"null`](#null) + - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) + - [Deserialization](#deserialization) + - [Merkleization](#merkleization) + - [Self-signed containers](#self-signed-containers) + - [Implementations](#implementations) + + ## Constants @@ -41,8 +48,12 @@ This is a **work in progress** describing typing, serialization and Merkleizatio * angle bracket notation `[type, N]`, e.g. `["uint64", N]` * **list**: ordered variable-length homogeneous collection of values * angle bracket notation `[type]`, e.g. `["uint64"]` +* **union**: union type containing one of the given subtypes + * round bracket notation `(type_1, type_2, ...)`, e.g. `("null", "uint64")` -We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size". +### Variable-size and fixed-size + +We recursively define "variable-size" types to be lists and unions and all types that contain a variable-size type. All other types are said to be "fixed-size". ### Aliases @@ -51,10 +62,19 @@ For convenience we alias: * `"byte"` to `"uint8"` (this is a basic type) * `"bytes"` to `["byte"]` (this is *not* a basic type) * `"bytesN"` to `["byte", N]` (this is *not* a basic type) +* `"null"`: `{}`, i.e. the empty container ### Default values -The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. +The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. Unions default to the first type in the union (with type index zero), which is `"null"` if present in the union. + +#### `is_empty` + +An SSZ object is called empty (and thus `is_empty(object)` returns true) if it is equal to the default value for that type. + +### Illegal types + +Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `"null"` type is only legal as the first type in a union subtype (i.e., with type index zero). ## Serialization @@ -62,7 +82,6 @@ We recursively define the `serialize` function which consumes an object `value` > *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type. - ### `"uintN"` ```python @@ -77,10 +96,16 @@ assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### Containers, vectors, lists +### `"null"` ```python -# Reccursively serialize +return b"" +``` + +### Vectors, containers, lists, unions + +```python +# Recursively serialize fixed_parts = [serialize(element) if not is_variable_size(element) else None for element in value] variable_parts = [serialize(element) if is_variable_size(element) else b"" for element in value] @@ -97,6 +122,16 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum return b"".join(fixed_parts + variable_parts) ``` +If `value` is a union type: + +Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type. + +```python +serialized_bytes = serialize(value.value) +serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little") +return serialized_type_index + serialized_bytes +``` + ## Deserialization Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations). @@ -106,8 +141,9 @@ Because serialization is an injective function (i.e. two distinct objects of the We first define helper functions: * `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. -* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root. +* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root. Note that `merkleize` on a single chunk is simply that chunk, i.e. the identity when the number of chunks is one. * `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`. +* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`. We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: @@ -115,6 +151,7 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi * `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects * `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container * `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects +* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type ## Self-signed containers @@ -130,7 +167,7 @@ Let `value` be a self-signed container object. The convention is that the signat | Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) | | TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | -| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | +| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | | C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) | -| C++ | | | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | +| C++ | | Jiyun Kim | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index d245fcfa4..277e986d5 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -1,17 +1,27 @@ # General test format -This document defines the YAML format and structure used for ETH 2.0 testing. +This document defines the YAML format and structure used for Eth 2.0 testing. -## ToC +## Table of contents + -* [About](#about) -* [Glossary](#glossary) -* [Test format philosophy](#test-format-philosophy) -* [Test Suite](#test-suite) -* [Config](#config) -* [Fork-timeline](#fork-timeline) -* [Config sourcing](#config-sourcing) -* [Test structure](#test-structure) +- [General test format](#general-test-format) + - [Table of contents](#table-of-contents) + - [About](#about) + - [Test-case formats](#test-case-formats) + - [Glossary](#glossary) + - [Test format philosophy](#test-format-philosophy) + - [Config design](#config-design) + - [Fork config design](#fork-config-design) + - [Test completeness](#test-completeness) + - [Test suite](#test-suite) + - [Config](#config) + - [Fork-timeline](#fork-timeline) + - [Config sourcing](#config-sourcing) + - [Test structure](#test-structure) + - [Note for implementers](#note-for-implementers) + + ## About @@ -25,7 +35,8 @@ Test formats: - [`bls`](./bls/README.md) - [`operations`](./operations/README.md) - [`shuffling`](./shuffling/README.md) -- [`ssz`](./ssz/README.md) +- [`ssz_generic`](./ssz_generic/README.md) +- [`ssz_static`](./ssz_static/README.md) - More formats are planned, see tracking issues for CI/testing ## Glossary @@ -52,28 +63,28 @@ Test formats: ### Config design After long discussion, the following types of configured constants were identified: -- Never changing: genesis data +- Never changing: genesis data. - Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion - `(genesis data, timestamp) -> epoch number` you end up needing both constants. + `(genesis data, timestamp) -> epoch number`, you end up needing both constants. - Changing, but kept around during fork transition: finalization may take a while, e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants. -- Additional, back-wards compatible: new constants are introduced for later phases +- Additional, backwards compatible: new constants are introduced for later phases. - Changing: there is a very small chance some constant may really be *replaced*. In this off-chance, it is likely better to include it as an additional variable, - and some clients may simply stop supporting the old one, if they do not want to sync from genesis. + and some clients may simply stop supporting the old one if they do not want to sync from genesis. Based on these types of changes, we model the config as a list of key value pairs, - that only grows with every fork (they may change in development versions of forks however, git manages this). -With this approach, configurations are backwards compatible (older clients ignore unknown variables), and easy to maintain. + that only grows with every fork (they may change in development versions of forks, however; git manages this). +With this approach, configurations are backwards compatible (older clients ignore unknown variables) and easy to maintain. ### Fork config design There are two types of fork-data: -1) timeline: when does a fork take place? -2) coverage: what forks are covered by a test? +1) Timeline: When does a fork take place? +2) Coverage: What forks are covered by a test? The first is neat to have as a separate form: we prevent duplication, and can run with different presets - (e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet) + (e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet). The second does not affect the result of the tests, it just states what is covered by the tests, so that the right suites can be executed to see coverage for a certain fork. @@ -90,7 +101,7 @@ The aim is to provide clients with a well-defined scope of work to run a particu - Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners. - Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible. -## Test Suite +## Test suite ``` title: -- Display name for the test suite @@ -113,9 +124,9 @@ Separation of configuration and tests aims to: - Prevent duplication of configuration - Make all tests easy to upgrade (e.g. when a new config constant is introduced) - Clearly define which constants to use -- Shareable between clients, for cross-client short or long lived testnets +- Shareable between clients, for cross-client short- or long-lived testnets - Minimize the amounts of different constants permutations to compile as a client. - Note: Some clients prefer compile-time constants and optimizations. + *Note*: Some clients prefer compile-time constants and optimizations. They should compile for each configuration once, and run the corresponding tests per build target. The format is described in [`configs/constant_presets`](../../configs/constant_presets/README.md#format). @@ -124,9 +135,9 @@ The format is described in [`configs/constant_presets`](../../configs/constant_p ## Fork-timeline A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration: - - we do not allocate or optimize any code based on epoch numbers - - when we transition from one fork to the other, it is preferred to stay online. - - we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event), + - We do not allocate or optimize any code based on epoch numbers. + - When we transition from one fork to the other, it is preferred to stay online. + - We may decide on an epoch number for a fork based on external events (e.g. Eth1 log event); a client should be able to activate a fork dynamically. The format is described in [`configs/fork_timelines`](../../configs/fork_timelines/README.md#format). diff --git a/specs/test_formats/bls/README.md b/specs/test_formats/bls/README.md index db63bba1d..4d95bdfd7 100644 --- a/specs/test_formats/bls/README.md +++ b/specs/test_formats/bls/README.md @@ -1,7 +1,7 @@ # BLS tests A test type for BLS. Primarily geared towards verifying the *integration* of any BLS library. -We do not recommend to roll your own crypto, or use an untested BLS library. +We do not recommend rolling your own crypto or using an untested BLS library. The BLS test suite runner has the following handlers: @@ -12,4 +12,4 @@ The BLS test suite runner has the following handlers: - [`priv_to_pub`](./priv_to_pub.md) - [`sign_msg`](./sign_msg.md) -Note: signature-verification and aggregate-verify test cases are not yet supported. +*Note*: Signature-verification and aggregate-verify test cases are not yet supported. diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md index 57be96565..25074742d 100644 --- a/specs/test_formats/shuffling/README.md +++ b/specs/test_formats/shuffling/README.md @@ -1,16 +1,16 @@ # Test format: shuffling -The runner of the Shuffling test type has only one handler: `core` +The runner of the Shuffling test type has only one handler: `core`. -This does not mean however that testing is limited. +However, this does not mean that testing is limited. Clients may take different approaches to shuffling, for optimizing, and supporting advanced lookup behavior back in older history. For implementers, possible test runners implementing testing can include: -1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation) -2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`) -3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go. -4) test complete shuffle in reverse (reverse rounds, same as 2) +1) Just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation). +2) Test un-permute-index (the reverse lookup; implemented by running the shuffling rounds in reverse, from `round_count-1` to `0`). +3) Test the optimized complete shuffle, where all indices are shuffled at once; test output in one go. +4) Test complete shuffle in reverse (reverse rounds, same as #2). ## Test case format diff --git a/specs/test_formats/ssz_generic/README.md b/specs/test_formats/ssz_generic/README.md index 9fda0c368..da0898087 100644 --- a/specs/test_formats/ssz_generic/README.md +++ b/specs/test_formats/ssz_generic/README.md @@ -3,7 +3,7 @@ This set of test-suites provides general testing for SSZ: to instantiate any container/list/vector/other type from binary data. -Since SSZ is in a development-phase, not the full suite of features is covered yet. +Since SSZ is in a development-phase, the full suite of features is not covered yet. Note that these tests are based on the older SSZ package. The tests are still relevant, but limited in scope: more complex object encodings have changed since the original SSZ testing. @@ -11,10 +11,10 @@ The tests are still relevant, but limited in scope: A minimal but useful series of tests covering `uint` encoding and decoding is provided. This is a direct port of the older SSZ `uint` tests (minus outdated test cases). -[uint test format](./uint.md). +Test format documentation can be found here: [uint test format](./uint.md). -Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. +*Note*: The current Phase 0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. The exact uint lengths to support may be redefined in the future. -Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`, +Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`; see CI/testing issues for progress tracking. diff --git a/specs/test_formats/ssz_static/README.md b/specs/test_formats/ssz_static/README.md index 413b00c75..1df2cb5f6 100644 --- a/specs/test_formats/ssz_static/README.md +++ b/specs/test_formats/ssz_static/README.md @@ -1,7 +1,7 @@ # SSZ, static tests This set of test-suites provides static testing for SSZ: - to instantiate just the known ETH-2.0 SSZ types from binary data. + to instantiate just the known Eth 2.0 SSZ types from binary data. This series of tests is based on the spec-maintained `minimal_ssz.py`, i.e. fully consistent with the SSZ spec. diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md index 0f26e0f9c..f24a225b0 100644 --- a/specs/test_formats/ssz_static/core.md +++ b/specs/test_formats/ssz_static/core.md @@ -1,7 +1,7 @@ # Test format: SSZ static types The goal of this type is to provide clients with a solid reference for how the known SSZ objects should be encoded. -Each object described in the Phase-0 spec is covered. +Each object described in the Phase 0 spec is covered. This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes do not support (or have alternatives for) generic SSZ encoding/decoding. This test-format ensures these direct serializations are covered. @@ -27,6 +27,6 @@ A test-runner can implement the following assertions: ## References -**`serialized`**: [SSZ serialization](../../simple-serialize.md#serialization) -**`root`** - [hash_tree_root](../../simple-serialize.md#merkleization) function -**`signing_root`** - [signing_root](../../simple-serialize.md#self-signed-containers) function +**`serialized`**—[SSZ serialization](../../simple-serialize.md#serialization) +**`root`**—[hash_tree_root](../../simple-serialize.md#merkleization) function +**`signing_root`**—[signing_root](../../simple-serialize.md#self-signed-containers) function diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 82f740f7d..e7a3a1a9d 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -1,13 +1,13 @@ # Ethereum 2.0 Phase 0 -- Honest Validator -__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. +**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. -## Table of Contents +## Table of contents - [Ethereum 2.0 Phase 0 -- Honest Validator](#ethereum-20-phase-0----honest-validator) - - [Table of Contents](#table-of-contents) + - [Table of contents](#table-of-contents) - [Introduction](#introduction) - [Prerequisites](#prerequisites) - [Constants](#constants) @@ -20,6 +20,8 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Process deposit](#process-deposit) - [Validator index](#validator-index) - [Activation](#activation) + - [Validator assignments](#validator-assignments) + - [Lookahead](#lookahead) - [Beacon chain responsibilities](#beacon-chain-responsibilities) - [Block proposal](#block-proposal) - [Block header](#block-header) @@ -37,21 +39,14 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Voluntary exits](#voluntary-exits) - [Attestations](#attestations-1) - [Attestation data](#attestation-data) - - [Slot](#slot-1) - - [Beacon block root](#beacon-block-root) - - [Source epoch](#source-epoch) - - [Source root](#source-root) - - [Target root](#target-root) - - [Shard](#shard) - - [Previous crosslink root](#previous-crosslink-root) - - [Crosslink data root](#crosslink-data-root) + - [LMD GHOST vote](#lmd-ghost-vote) + - [FFG vote](#ffg-vote) + - [Crosslink vote](#crosslink-vote) - [Construct attestation](#construct-attestation) - [Data](#data) - [Aggregation bitfield](#aggregation-bitfield) - [Custody bitfield](#custody-bitfield) - [Aggregate signature](#aggregate-signature) - - [Validator assignments](#validator-assignments) - - [Lookahead](#lookahead) - [How to avoid slashing](#how-to-avoid-slashing) - [Proposer slashing](#proposer-slashing) - [Attester slashing](#attester-slashing) @@ -96,21 +91,21 @@ The validator constructs their `withdrawal_credentials` via the following: ### Submit deposit -In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`. +In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`. To submit a deposit: * Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object. -* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. +* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE`. * Set `deposit_data.amount = amount`. -* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. +* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=bls_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `bls_domain` will default to zeroes there). * Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei. -_Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`. +*Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`. ### Process deposit -Deposits cannot be processed into the beacon chain until the eth1.0 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` eth1.0 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite eth1.0 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validator_registry` within an epoch or two. The validator is then in a queue to be activated. +Deposits cannot be processed into the beacon chain until the Eth 1.0 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth 1.0 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite Eth 1.0 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validator_registry` within an epoch or two. The validator is then in a queue to be activated. ### Validator index @@ -120,25 +115,73 @@ Once a validator has been processed and added to the beacon state's `validator_r In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). -The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: +The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows: ```python -shuffling_epoch = state.current_shuffling_epoch validator = state.validator_registry[validator_index] -is_active = is_active_validator(validator, shuffling_epoch) +is_active = is_active_validator(validator, get_current_epoch(state)) ``` Once a validator is activated, the validator is assigned [responsibilities](#beacon-chain-responsibilities) until exited. -_Note_: There is a maximum validator churn per finalized epoch so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated. +*Note*: There is a maximum validator churn per finalized epoch so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated. + +## Validator assignments + +A validator can get committee assignments for a given epoch using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`. + +```python +def get_committee_assignment( + state: BeaconState, + epoch: Epoch, + validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]: + """ + Return the committee assignment in the ``epoch`` for ``validator_index``. + ``assignment`` returned is a tuple of the following form: + * ``assignment[0]`` is the list of validators in the committee + * ``assignment[1]`` is the shard to which the committee is assigned + * ``assignment[2]`` is the slot at which the committee is assigned + """ + next_epoch = get_current_epoch(state) + 1 + assert epoch <= next_epoch + + committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH + epoch_start_slot = get_epoch_start_slot(epoch) + for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH) + offset = committees_per_slot * (slot % SLOTS_PER_EPOCH) + slot_start_shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT + for i in range(committees_per_slot): + shard = (slot_start_shard + i) % SHARD_COUNT + committee = get_crosslink_committee(state, epoch, shard) + if validator_index in committee: + return committee, shard, slot +``` + +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch. + +```python +def is_proposer(state: BeaconState, + validator_index: ValidatorIndex) -> bool: + return get_beacon_proposer_index(state) == validator_index +``` + +*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. + +### Lookahead + +The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question. + +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and also which shard they should begin syncing (in Phase 1+). + +Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. ## Beacon chain responsibilities -A validator has two primary responsibilities to the beacon chain -- [proposing blocks](block-proposal) and [creating attestations](attestations-1). Proposals happen infrequently, whereas attestations should be created once per epoch. +A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attestations-1). Proposals happen infrequently, whereas attestations should be created once per epoch. ### Block proposal -A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). +A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks). @@ -148,17 +191,17 @@ There is one proposer per slot, so if there are N active validators any individu Set `block.slot = slot` where `slot` is the current slot at which the validator has been selected to propose. The `parent` selected must satisfy that `parent.slot < block.slot`. -_Note:_ there might be "skipped" slots between the `parent` and `block`. These skipped slots are processed in the state transition function without per-block processing. +*Note*: There might be "skipped" slots between the `parent` and `block`. These skipped slots are processed in the state transition function without per-block processing. ##### Parent root -Set `block.previous_block_root = signing_root(parent)`. +Set `block.parent_root = signing_root(parent)`. ##### State root Set `block.state_root = hash_tree_root(state)` of the resulting `state` of the `parent -> block` state transition. -_Note_: To calculate `state_root`, the validator should first run the state transition function on an unsigned `block` containing a stub for the `state_root`. It is useful to be able to run a state transition function that does _not_ validate signatures or state root for this purpose. +*Note*: To calculate `state_root`, the validator should first run the state transition function on an unsigned `block` containing a stub for the `state_root`. It is useful to be able to run a state transition function that does _not_ validate signatures or state root for this purpose. ##### Randao reveal @@ -180,16 +223,16 @@ epoch_signature = bls_sign( `block.eth1_data` is a mechanism used by block proposers vote on a recent Ethereum 1.0 block hash and an associated deposit root found in the Ethereum 1.0 deposit contract. When consensus is formed, `state.latest_eth1_data` is updated, and validator deposits up to this root can be processed. The deposit root can be calculated by calling the `get_deposit_root()` function of the deposit contract using the post-state of the block hash. -* Let `D` be the set of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where: - * `vote.eth1_data.block_hash` is the hash of an eth1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_data`. - * `vote.eth1_data.deposit_count` is the deposit count of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. - * `vote.eth1_data.deposit_root` is the deposit root of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. +* Let `D` be the list of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where: + * `vote.eth1_data.block_hash` is the hash of an Eth 1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_hash`. + * `vote.eth1_data.deposit_count` is the deposit count of the Eth 1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. + * `vote.eth1_data.deposit_root` is the deposit root of the Eth 1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. * If `D` is empty: - * Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical eth1.0 chain. - * Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash` + * Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical Eth 1.0 chain. + * Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the Eth 1.0 deposit contract in the post-state of the block referenced by `block_hash` * Let `best_vote_data = Eth1Data(block_hash=block_hash, deposit_root=deposit_root, deposit_count=deposit_count)`. * If `D` is nonempty: - * Let `best_vote_data` be the `eth1_data` of the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height. + * Let `best_vote_data` be the `eth1_data` member of `D` that has the highest vote count (`D.count(eth1_data)`), breaking ties by favoring block hashes with higher associated block height. * Set `block.eth1_data = best_vote_data`. ##### Signature @@ -224,7 +267,7 @@ Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. ##### Deposits -If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). +If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. @@ -234,54 +277,42 @@ Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntar ### Attestations -A validator is expected to create, sign, and broadcast an attestation during each epoch. The slot during which the validator performs this role is any slot at which `get_crosslink_committees_at_slot(state, slot)` contains a committee that contains `validator_index`. +A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `shard`, and assigned `slot` for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`. -A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned -- that is `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`. +A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned ― that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`. #### Attestation data First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. * Let `head_block` be the result of running the fork choice during the assigned slot. -* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot. +* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`. -##### Slot - -Set `attestation_data.slot = head_state.slot`. - -##### Beacon block root +##### LMD GHOST vote Set `attestation_data.beacon_block_root = signing_root(head_block)`. -##### Source epoch +##### FFG vote -Set `attestation_data.source_epoch = head_state.justified_epoch`. +* Set `attestation_data.source_epoch = head_state.current_justified_epoch`. +* Set `attestation_data.source_root = head_state.current_justified_root`. +* Set `attestation_data.target_epoch = get_current_epoch(head_state)` +* Set `attestation_data.target_root = epoch_boundary_block_root` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary. -##### Source root - -Set `attestation_data.source_root = head_state.current_justified_root`. - -##### Target root - -Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. - -_Note:_ This can be looked up in the state using: +*Note*: `epoch_boundary_block_root` can be looked up in the state using: * Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. -* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. +* Let `epoch_boundary_block_root = signing_root(head_block) if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. -##### Shard +##### Crosslink vote -Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`. +Construct `attestation_data.crosslink` via the following. -##### Previous crosslink root - -Set `attestation_data.previous_crosslink_root = hash_tree_root(head_state.current_crosslinks[shard])`. - -##### Crosslink data root - -Set `attestation_data.crosslink_data_root = ZERO_HASH`. - -_Note:_ This is a stub for phase 0. +* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee. +* Let `parent_crosslink = head_state.current_crosslinks[shard]`. +* Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`. +* Set `attestation_data.crosslink.end_epoch = min(attestation_data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`. +* Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`. +* Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0. #### Construct attestation @@ -298,14 +329,14 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes * Set `aggregation_bitfield[index_into_committee // 8] |= 2 ** (index_into_committee % 8)`. * Set `attestation.aggregation_bitfield = aggregation_bitfield`. -_Note_: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. +*Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. ##### Custody bitfield * Let `custody_bitfield` be a byte array filled with zeros of length `(len(committee) + 7) // 8`. * Set `attestation.custody_bitfield = custody_bitfield`. -_Note:_ This is a stub for phase 0. +*Note*: This is a stub for Phase 0. ##### Aggregate signature @@ -329,84 +360,23 @@ signed_attestation_data = bls_sign( ) ``` -## Validator assignments - -A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`. - -```python -def get_committee_assignment( - state: BeaconState, - epoch: Epoch, - validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]: - """ - Return the committee assignment in the ``epoch`` for ``validator_index``. - ``assignment`` returned is a tuple of the following form: - * ``assignment[0]`` is the list of validators in the committee - * ``assignment[1]`` is the shard to which the committee is assigned - * ``assignment[2]`` is the slot at which the committee is assigned - """ - previous_epoch = get_previous_epoch(state) - next_epoch = get_current_epoch(state) + 1 - assert previous_epoch <= epoch <= next_epoch - - epoch_start_slot = get_epoch_start_slot(epoch) - for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH): - crosslink_committees = get_crosslink_committees_at_slot( - state, - slot, - ) - selected_committees = [ - committee # Tuple[List[ValidatorIndex], Shard] - for committee in crosslink_committees - if validator_index in committee[0] - ] - if len(selected_committees) > 0: - validators = selected_committees[0][0] - shard = selected_committees[0][1] - - assignment = (validators, shard, slot) - return assignment -``` - -A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question. Proposer selection is only stable within the context of the current epoch. - -```python -def is_proposer_at_slot(state: BeaconState, - slot: Slot, - validator_index: ValidatorIndex) -> bool: - assert state.slot == slot - - return get_beacon_proposer_index(state) == validator_index -``` - -_Note_: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot. - - -### Lookahead - -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the slot in question. - -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+). - -Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. - ## How to avoid slashing "Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed -- [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed. -_Note_: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102 and vice versa. +*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102 and vice versa. ### Proposer slashing To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch. -_In phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings._ +*In Phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings.* -Specifically, when signing an `BeaconBlock`, a validator should perform the following steps in the following order: -1. Save a record to hard disk that an beacon block has been signed for the `epoch=slot_to_epoch(block.slot)`. +Specifically, when signing a `BeaconBlock`, a validator should perform the following steps in the following order: +1. Save a record to hard disk that a beacon block has been signed for the `epoch=slot_to_epoch(block.slot)`. 2. Generate and broadcast the block. -If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the _potentially_ signed/broadcast block and can effectively avoid slashing. +If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing. ### Attester slashing @@ -416,4 +386,4 @@ Specifically, when signing an `Attestation`, a validator should perform the foll 1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`. 2. Generate and broadcast attestation. -If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the _potentially_ signed/broadcast attestation and can effectively avoid slashing. +If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing. diff --git a/specs/validator/0_beacon-node-validator-api.md b/specs/validator/0_beacon-node-validator-api.md new file mode 100644 index 000000000..2a5fe7fcd --- /dev/null +++ b/specs/validator/0_beacon-node-validator-api.md @@ -0,0 +1,28 @@ +# Ethereum 2.0 Phase 0 -- Beacon Node API for Validator + +__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- Honest Validator](0_beacon-chain-validator.md) that describes an API exposed by the beacon node, which enables the validator client to participate in the Ethereum 2.0 protocol. + +## Outline + +This document outlines a minimal application programming interface (API) which is exposed by a beacon node for use by a validator client implementation which aims to facilitate [_phase 0_](../../README.md#phase-0) of Ethereum 2.0. + +The API is a REST interface, accessed via HTTP, designed for use as a local communications protocol between binaries. The only supported return data type is currently JSON. + +### Background +The beacon node maintains the state of the beacon chain by communicating with other beacon nodes in the Ethereum Serenity network. Conceptually, it does not maintain keypairs that participate with the beacon chain. + +The validator client is a conceptually separate entity which utilizes private keys to perform validator related tasks on the beacon chain, which we call validator "duties". These duties include the production of beacon blocks and signing of attestations. + +Since it is recommended to separate these concerns in the client implementations, we must clearly define the communication between them. + +The goal of this specification is to promote interoperability between beacon nodes and validator clients derived from different projects and to encourage innovation in validator client implementations, independently from beacon node development. For example, the validator client from Lighthouse could communicate with a running instance of the beacon node from Prysm, or a staking pool might create a decentrally managed validator client which utilises the same API. + +This specification is derived from a proposal and discussion on Issues [#1011](https://github.com/ethereum/eth2.0-specs/issues/1011) and [#1012](https://github.com/ethereum/eth2.0-specs/issues/1012) + + +## Specification + +The API specification has been written in [OpenAPI 3.0](https://swagger.io/docs/specification/about/) and is provided in the [beacon_node_oapi.yaml](beacon_node_oapi.yaml) file alongside this document. + +For convenience, this specification has been uploaded to [SwaggerHub](https://swagger.io/tools/swaggerhub/) at the following URL: +[https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator](https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator) diff --git a/specs/validator/beacon_node_oapi.yaml b/specs/validator/beacon_node_oapi.yaml new file mode 100644 index 000000000..74be21fac --- /dev/null +++ b/specs/validator/beacon_node_oapi.yaml @@ -0,0 +1,641 @@ +openapi: "3.0.2" +info: + title: "Minimal Beacon Node API for Validator" + description: "A minimal API specification for the beacon node, which enables a validator to connect and perform its obligations on the Ethereum 2.0 phase 0 beacon chain." + version: "0.2.0" + license: + name: "Apache 2.0" + url: "https://www.apache.org/licenses/LICENSE-2.0.html" +tags: + - name: MinimalSet + description: The minimal set of endpoints to enable a working validator implementation. + - name: OptionalSet + description: Extra endpoints which are nice-to-haves. +paths: + /node/version: + get: + tags: + - MinimalSet + summary: "Get version string of the running beacon node." + description: "Requests that the beacon node identify information about its implementation in a format similar to a [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) field." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/version' + 500: + $ref: '#/components/responses/InternalError' + /node/genesis_time: + get: + tags: + - MinimalSet + summary: "Get the genesis_time parameter from beacon node configuration." + description: "Requests the genesis_time parameter from the beacon node, which should be consistent across all beacon nodes that follow the same beacon chain." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/genesis_time' + 500: + $ref: '#/components/responses/InternalError' + + /node/syncing: + get: + tags: + - MinimalSet + summary: "Poll to see if the the beacon node is syncing." + description: "Requests the beacon node to describe if it's currently syncing or not, and if it is, what block it is up to. This is modelled after the Eth1.0 JSON-RPC eth_syncing call.." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + is_syncing: + type: boolean + description: "A boolean of whether the node is currently syncing or not." + sync_status: + $ref: '#/components/schemas/SyncingStatus' + 500: + $ref: '#/components/responses/InternalError' + /node/fork: + get: + tags: + - OptionalSet + summary: "Get fork information from running beacon node." + description: "Requests the beacon node to provide which fork version it is currently on." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + fork: + $ref: '#/components/schemas/Fork' + chain_id: + type: integer + format: uint64 + description: "Sometimes called the network id, this number discerns the active chain for the beacon node. Analogous to Eth1.0 JSON-RPC net_version." + 500: + $ref: '#/components/responses/InternalError' + + /validator/duties: + get: + tags: + - MinimalSet + summary: "Get validator duties for the requested validators." + description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized." + parameters: + - name: validator_pubkeys + in: query + required: true + description: "An array of hex-encoded BLS public keys" + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + - name: epoch + in: query + required: false + schema: + type: integer + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorDuty' + 400: + $ref: '#/components/responses/InvalidRequest' + 406: + description: "Duties cannot be provided for the requested epoch." + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /validator/block: + get: + tags: + - MinimalSet + summary: "Produce a new block, without signature." + description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." + parameters: + - name: slot + in: query + required: true + description: "The slot for which the block should be proposed." + schema: + type: integer + format: uint64 + - name: randao_reveal + in: query + required: true + description: "The validator's randao reveal value." + schema: + type: string + format: byte + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - MinimalSet + summary: "Publish a signed block." + description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: beacon_block + in: query + required: true + description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." + schema: + $ref: '#/components/schemas/BeaconBlock' + responses: + 200: + description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /validator/attestation: + get: + tags: + - MinimalSet + summary: "Produce an attestation, without signature." + description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." + parameters: + - name: validator_pubkey + in: query + required: true + description: "Uniquely identifying which validator this attestation is to be produced for." + schema: + $ref: '#/components/schemas/pubkey' + - name: poc_bit + in: query + required: true + description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." + schema: + type: integer + format: uint32 + minimum: 0 + maximum: 1 + - name: slot + in: query + required: true + description: "The slot for which the attestation should be proposed." + schema: + type: integer + - name: shard + in: query + required: true + description: "The shard number for which the attestation is to be proposed." + schema: + type: integer + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/IndexedAttestation' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - MinimalSet + summary: "Publish a signed attestation." + description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: attestation + in: query + required: true + description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." + schema: + $ref: '#/components/schemas/IndexedAttestation' + responses: + 200: + description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + +components: + schemas: + pubkey: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{96}$" + description: "The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc" + version: + type: string + description: "A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3)." + example: "Lighthouse / v0.1.5 (Linux x86_64)" + genesis_time: + type: integer + format: uint64 + description: "The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began." + example: 1557716289 + ValidatorDuty: + type: object + properties: + validator_pubkey: + $ref: '#/components/schemas/pubkey' + attestation_slot: + type: integer + format: uint64 + description: "The slot at which the validator must attest." + attestation_shard: + type: integer + format: uint64 + description: "The shard in which the validator must attest." + block_proposal_slot: + type: integer + format: uint64 + nullable: true + description: "The slot in which a validator must propose a block, or `null` if block production is not required." + SyncingStatus: + type: object + nullable: true + properties: + starting_slot: + type: integer + format: uint64 + description: "The slot at which syncing started (will only be reset after the sync reached its head)" + current_slot: + type: integer + format: uint64 + description: "The most recent slot sync'd by the beacon node." + highest_slot: + type: integer + format: uint64 + description: "Globally, the estimated most recent slot number, or current target slot number." + + BeaconBlock: + description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." + allOf: + - $ref: '#/components/schemas/BeaconBlockCommon' + - type: object + properties: + body: + $ref: '#/components/schemas/BeaconBlockBody' + BeaconBlockHeader: + description: "The [`BeaconBlockHeader`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockheader) object from the Eth2.0 spec." + allOf: + - $ref: '#/components/schemas/BeaconBlockCommon' + - type: object + properties: + body_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The tree hash merkle root of the `BeaconBlockBody` for the `BeaconBlock`" + BeaconBlockCommon: + # An abstract object to collect the common fields between the BeaconBlockHeader and the BeaconBlock objects + type: object + properties: + slot: + type: integer + format: uint64 + description: "The slot to which this block corresponds." + parent_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The signing merkle root of the parent `BeaconBlock`." + state_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The tree hash merkle root of the `BeaconState` for the `BeaconBlock`." + signature: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{192}$" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + description: "The BLS signature of the `BeaconBlock` made by the validator of the block." + BeaconBlockBody: + type: object + description: "The [`BeaconBlockBody`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockbody) object from the Eth2.0 spec." + properties: + randao_reveal: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "The RanDAO reveal value provided by the validator." + eth1_data: + title: Eth1Data + type: object + description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." + properties: + deposit_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the deposit tree." + deposit_count: + type: integer + format: uint64 + description: "Total number of deposits." + block_hash: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Ethereum 1.x block hash." + graffiti: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + proposer_slashings: + type: array + items: + title: ProposerSlashings + type: object + description: "The [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) object from the Eth2.0 spec." + properties: + proposer_index: + type: integer + format: uint64 + description: "The index of the proposer to be slashed." + header_1: + $ref: '#/components/schemas/BeaconBlockHeader' + header_2: + $ref: '#/components/schemas/BeaconBlockHeader' + attester_slashings: + type: array + items: + title: AttesterSlashings + type: object + description: "The [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) object from the Eth2.0 spec." + properties: + attestation_1: + $ref: '#/components/schemas/IndexedAttestation' + attestation_2: + $ref: '#/components/schemas/IndexedAttestation' + attestations: + type: array + items: + title: Attestation + type: object + description: "The [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object from the Eth2.0 spec." + properties: + aggregation_bitfield: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]+$" + description: "Attester aggregation bitfield." + custody_bitfield: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]+$" + description: "Custody bitfield." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "BLS aggregate signature." + data: + $ref: '#/components/schemas/AttestationData' + deposits: + type: array + items: + title: Deposit + type: object + description: "The [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) object from the Eth2.0 spec." + properties: + proof: + type: array + description: "Branch in the deposit tree." + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + minItems: 32 + maxItems: 32 + index: + type: integer + format: uint64 + description: "Index in the deposit tree." + data: + title: DepositData + type: object + description: "The [`DepositData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositdata) object from the Eth2.0 spec." + properties: + pubkey: + $ref: '#/components/schemas/pubkey' + withdrawal_credentials: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The withdrawal credentials." + amount: + type: integer + format: uint64 + description: "Amount in Gwei." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Container self-signature." + voluntary_exits: + type: array + items: + title: VoluntaryExit + type: object + description: "The [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) object from the Eth2.0 spec." + properties: + epoch: + type: integer + format: uint64 + description: "Minimum epoch for processing exit." + validator_index: + type: integer + format: uint64 + description: "Index of the exiting validator." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Validator signature." + transfers: + type: array + items: + title: Transfer + type: object + description: "The [`Transfer`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#transfer) object from the Eth2.0 spec." + properties: + sender: + type: integer + format: uint64 + description: "Sender index." + recipient: + type: integer + format: uint64 + description: "Recipient index." + amount: + type: integer + format: uint64 + description: "Amount in Gwei." + fee: + type: integer + format: uint64 + description: "Fee in Gwei for block producer." + slot: + type: integer + format: uint64 + description: "Inclusion slot." + pubkey: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{96}$" + description: "Sender withdrawal public key." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Sender signature." + + Fork: + type: object + description: "The [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#Fork) object from the Eth2.0 spec." + properties: + previous_version: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{8}$" + description: "Previous fork version." + current_version: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{8}$" + description: "Current fork version." + epoch: + type: integer + format: uint64 + description: "Fork epoch number." + IndexedAttestation: + type: object + description: "The [`IndexedAttestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#indexedattestation) object from the Eth2.0 spec." + properties: + custody_bit_0_indices: + type: array + description: "Validator indices for 0 bits." + items: + type: integer + format: uint64 + custody_bit_1_indices: + type: array + description: "Validator indices for 1 bits." + items: + type: integer + format: uint64 + signature: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{192}$" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + description: "The BLS signature of the `IndexedAttestation`, created by the validator of the attestation." + data: + $ref: '#/components/schemas/AttestationData' + AttestationData: + type: object + description: "The [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object from the Eth2.0 spec." + properties: + beacon_block_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "LMD GHOST vote." + source_epoch: + type: integer + format: uint64 + description: "Source epoch from FFG vote." + source_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Source root from FFG vote." + target_epoch: + type: integer + format: uint64 + description: "Target epoch from FFG vote." + target_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Target root from FFG vote." + crosslink: + title: CrossLink + type: object + description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." + properties: + shard: + type: integer + format: uint64 + description: "The shard number." + start_epoch: + type: integer + format: uint64 + description: "The first epoch which the crosslinking data references." + end_epoch: + type: integer + format: uint64 + description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." + parent_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the previous crosslink." + data_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the crosslinked shard data since the previous crosslink." + + responses: + Success: + description: "Request successful." + InvalidRequest: + description: "Invalid request syntax." + InternalError: + description: "Beacon node internal error." + CurrentlySyncing: + description: "Beacon node is currently syncing, try again later." + NotFound: + description: "The requested API endpoint does not exist." diff --git a/test_generators/README.md b/test_generators/README.md index 309a64bd9..95d7e70a8 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -2,7 +2,7 @@ This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations. -Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests). +Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests). Whenever a release is made, the new tests are automatically built, and [eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository. @@ -12,7 +12,7 @@ Whenever a release is made, the new tests are automatically built, and Prerequisites: - Python 3 installed - PIP 3 -- GNU make +- GNU Make ### Cleaning @@ -66,7 +66,7 @@ eth-utils==1.6.0 The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests. Applying configurations to the spec is simple and enables you to create test suites with different contexts. -Note: make sure to run `make pyspec` from the root of the specs repository in order to build the pyspec requirement. +*Note*: Make sure to run `make pyspec` from the root of the specs repository in order to build the pyspec requirement. Install all the necessary requirements (re-run when you add more): ```bash @@ -134,7 +134,7 @@ if __name__ == "__main__": Recommendations: - You can have more than just one suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])`. - You can concatenate lists of test cases if you don't want to split it up in suites, however, make sure they can be run with one handler. -- You can split your suite creators into different python files/packages; this is good for code organization. +- You can split your suite creators into different Python files/packages; this is good for code organization. - Use config "minimal" for performance, but also implement a suite with the default config where necessary. - You may be able to write your test suite creator in a way where it does not make assumptions on constants. If so, you can generate test suites with different configurations for the same scenario (see example). @@ -157,8 +157,8 @@ To add a new test generator that builds `New Tests`: [circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml) if desired to increase code quality. -Note: you do not have to change the makefile. -However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case. +*Note*: You do not have to change the makefile. +However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case. Do note that generators should be easy to maintain, lean, and based on the spec. @@ -167,5 +167,5 @@ Do note that generators should be easy to maintain, lean, and based on the spec. If a test generator is not needed anymore, undo the steps described above and make a new release: 1. Remove the generator directory. -2. Remove the generated tests in the [`eth2.0-tests`](https://github.com/ethereum/eth2.0-tests) repository by opening a PR there. +2. Remove the generated tests in the [`eth2.0-spec-tests`](https://github.com/ethereum/eth2.0-spec-tests) repository by opening a pull request there. 3. Make a new release. diff --git a/test_generators/epoch_processing/main.py b/test_generators/epoch_processing/main.py index 8f067e4a3..2ce895fc5 100644 --- a/test_generators/epoch_processing/main.py +++ b/test_generators/epoch_processing/main.py @@ -1,6 +1,7 @@ from typing import Callable, Iterable -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth2spec.test.epoch_processing import ( test_process_crosslinks, test_process_registry_updates @@ -14,7 +15,8 @@ def create_suite(transition_name: str, config_name: str, get_cases: Callable[[], -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%s_%s" % (transition_name, config_name), transition_name, gen_suite.render_suite( title="%s epoch processing" % transition_name, diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py index 96c639d12..82e05b307 100644 --- a/test_generators/operations/main.py +++ b/test_generators/operations/main.py @@ -13,14 +13,16 @@ from eth2spec.test.block_processing import ( from gen_base import gen_runner, gen_suite, gen_typing from gen_from_tests.gen import generate_from_tests from preset_loader import loader -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 def create_suite(operation_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \ -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%s_%s" % (operation_name, config_name), operation_name, gen_suite.render_suite( title="%s operation" % operation_name, @@ -42,8 +44,8 @@ if __name__ == "__main__": create_suite('attester_slashing', 'mainnet', lambda: generate_from_tests(test_process_attester_slashing)), create_suite('block_header', 'minimal', lambda: generate_from_tests(test_process_block_header)), create_suite('block_header', 'mainnet', lambda: generate_from_tests(test_process_block_header)), - create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit)), - create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit)), + create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit)), + create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit)), create_suite('proposer_slashing', 'minimal', lambda: generate_from_tests(test_process_proposer_slashing)), create_suite('proposer_slashing', 'mainnet', lambda: generate_from_tests(test_process_proposer_slashing)), create_suite('transfer', 'minimal', lambda: generate_from_tests(test_process_transfer)), diff --git a/test_generators/sanity/main.py b/test_generators/sanity/main.py index bba6ed03d..a9c0fe160 100644 --- a/test_generators/sanity/main.py +++ b/test_generators/sanity/main.py @@ -5,14 +5,16 @@ from eth2spec.test.sanity import test_blocks, test_slots from gen_base import gen_runner, gen_suite, gen_typing from gen_from_tests.gen import generate_from_tests from preset_loader import loader -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \ -> Callable[[str], gen_typing.TestSuiteOutput]: def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("%sanity_s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite( title="sanity testing", diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py index bb14520e1..862c4d910 100644 --- a/test_generators/shuffling/main.py +++ b/test_generators/shuffling/main.py @@ -1,4 +1,5 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth_utils import ( to_dict, to_tuple ) @@ -15,14 +16,15 @@ def shuffling_case(seed: spec.Bytes32, count: int): @to_tuple def shuffling_test_cases(): - for seed in [spec.hash(spec.int_to_bytes4(seed_init_value)) for seed_init_value in range(30)]: + for seed in [spec.hash(spec.int_to_bytes(seed_init_value, length=4)) for seed_init_value in range(30)]: for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]: yield shuffling_case(seed, count) def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'minimal') - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("shuffling_minimal", "core", gen_suite.render_suite( title="Swap-or-Not Shuffling tests with minimal config", @@ -37,7 +39,8 @@ def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'mainnet') - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) return ("shuffling_full", "core", gen_suite.render_suite( title="Swap-or-Not Shuffling tests with mainnet config", diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index e8995b918..7de5237d1 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -2,7 +2,7 @@ from random import Random from eth2spec.debug import random_value, encode from eth2spec.phase0 import spec -from eth2spec.utils.minimal_ssz import ( +from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, signing_root, serialize, diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 330972e77..2c2226ee7 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -1,11 +1,11 @@ -# ETH 2.0 PySpec +# Eth 2.0 Executable Python Spec (PySpec) -The Python executable spec is built from the ETH 2.0 specification, +The executable Python spec is built from the Eth 2.0 specification, complemented with the necessary helper functions for hashing, BLS, and more. With this executable spec, test-generators can easily create test-vectors for client implementations, - and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest. + and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest. ## Building @@ -14,12 +14,12 @@ All the dynamic parts of the spec can be build at once with `make pyspec`. Alternatively, you can build a sub-set of the pyspec: `make phase0`. -Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py` +Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py`. ## Py-tests -After building, you can install the dependencies for running the `pyspec` tests with `make install_test` +After building, you can install the dependencies for running the `pyspec` tests with `make install_test`. These tests are not intended for client-consumption. These tests are sanity tests, to verify if the spec itself is consistent. @@ -28,7 +28,7 @@ These tests are sanity tests, to verify if the spec itself is consistent. #### Automated -Run `make test` from the root of the spec repository. +Run `make test` from the root of the specs repository. #### Manual @@ -40,7 +40,7 @@ python3 -m venv venv . venv/bin/activate pip3 install -r requirements-testing.txt ``` -Note: make sure to run `make -B pyspec` from the root of the specs repository, +*Note*: Make sure to run `make -B pyspec` from the root of the specs repository, to build the parts of the pyspec module derived from the markdown specs. The `-B` flag may be helpful to force-overwrite the `pyspec` output after you made a change to the markdown source files. @@ -59,4 +59,4 @@ The pyspec is not a replacement. ## License -Same as the spec itself, see [LICENSE](../../LICENSE) file in spec repository root. +Same as the spec itself; see [LICENSE](../../LICENSE) file in the specs repository root. diff --git a/test_libs/pyspec/__init__.py b/test_libs/pyspec/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/debug/decode.py b/test_libs/pyspec/eth2spec/debug/decode.py index aeac3924d..5ce116025 100644 --- a/test_libs/pyspec/eth2spec/debug/decode.py +++ b/test_libs/pyspec/eth2spec/debug/decode.py @@ -1,28 +1,39 @@ -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, + is_vector_type, is_bytes_type, is_bytesn_type, is_container_type, + read_vector_elem_type, read_list_elem_type, + Vector, BytesN +) -def decode(json, typ): - if isinstance(typ, str) and typ[:4] == 'uint': - return json - elif typ == 'bool': - assert json in (True, False) - return json - elif isinstance(typ, list): - return [decode(element, typ[0]) for element in json] - elif isinstance(typ, str) and typ[:4] == 'byte': - return bytes.fromhex(json[2:]) - elif hasattr(typ, 'fields'): +def decode(data, typ): + if is_uint_type(typ): + return data + elif is_bool_type(typ): + assert data in (True, False) + return data + elif is_list_type(typ): + elem_typ = read_list_elem_type(typ) + return [decode(element, elem_typ) for element in data] + elif is_vector_type(typ): + elem_typ = read_vector_elem_type(typ) + return Vector(decode(element, elem_typ) for element in data) + elif is_bytes_type(typ): + return bytes.fromhex(data[2:]) + elif is_bytesn_type(typ): + return BytesN(bytes.fromhex(data[2:])) + elif is_container_type(typ): temp = {} - for field, subtype in typ.fields.items(): - temp[field] = decode(json[field], subtype) - if field + "_hash_tree_root" in json: - assert(json[field + "_hash_tree_root"][2:] == + for field, subtype in typ.get_fields(): + temp[field] = decode(data[field], subtype) + if field + "_hash_tree_root" in data: + assert(data[field + "_hash_tree_root"][2:] == hash_tree_root(temp[field], subtype).hex()) ret = typ(**temp) - if "hash_tree_root" in json: - assert(json["hash_tree_root"][2:] == + if "hash_tree_root" in data: + assert(data["hash_tree_root"][2:] == hash_tree_root(ret, typ).hex()) return ret else: - print(json, typ) - raise Exception("Type not recognized") + raise Exception(f"Type not recognized: data={data}, typ={typ}") diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py index d3513e638..61dd87928 100644 --- a/test_libs/pyspec/eth2spec/debug/encode.py +++ b/test_libs/pyspec/eth2spec/debug/encode.py @@ -1,28 +1,36 @@ -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, is_vector_type, is_container_type, + read_elem_type, + uint +) def encode(value, typ, include_hash_tree_roots=False): - if isinstance(typ, str) and typ[:4] == 'uint': - if typ[4:] == '128' or typ[4:] == '256': + if is_uint_type(typ): + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + # Larger uints are boxed and the class declares their byte length + if issubclass(typ, uint) and typ.byte_len > 8: return str(value) return value - elif typ == 'bool': + elif is_bool_type(typ): assert value in (True, False) return value - elif isinstance(typ, list): - return [encode(element, typ[0], include_hash_tree_roots) for element in value] - elif isinstance(typ, str) and typ[:4] == 'byte': + elif is_list_type(typ) or is_vector_type(typ): + elem_typ = read_elem_type(typ) + return [encode(element, elem_typ, include_hash_tree_roots) for element in value] + elif isinstance(typ, type) and issubclass(typ, bytes): # both bytes and BytesN return '0x' + value.hex() - elif hasattr(typ, 'fields'): + elif is_container_type(typ): ret = {} - for field, subtype in typ.fields.items(): - ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots) + for field, subtype in typ.get_fields(): + field_value = getattr(value, field) + ret[field] = encode(field_value, subtype, include_hash_tree_roots) if include_hash_tree_roots: - ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(field_value, subtype).hex() if include_hash_tree_roots: ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() return ret else: - print(value, typ) - raise Exception("Type not recognized") - + raise Exception(f"Type not recognized: value={value}, typ={typ}") diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py index a853d2328..3edcc8808 100644 --- a/test_libs/pyspec/eth2spec/debug/random_value.py +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -2,12 +2,19 @@ from random import Random from typing import Any from enum import Enum +from eth2spec.utils.ssz.ssz_impl import is_basic_type -UINT_SIZES = [8, 16, 32, 64, 128, 256] +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_list_type, + is_vector_type, is_bytes_type, is_bytesn_type, is_container_type, + read_vector_elem_type, read_list_elem_type, + uint_byte_size +) -basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte'] +# in bytes +UINT_SIZES = (1, 2, 4, 8, 16, 32) -random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"] +random_mode_names = ("random", "zero", "max", "nil", "one", "lengthy") class RandomizationMode(Enum): @@ -31,7 +38,12 @@ class RandomizationMode(Enum): return self.value in [0, 4, 5] -def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list_length: int, mode: RandomizationMode, chaos: bool) -> Any: +def get_random_ssz_object(rng: Random, + typ: Any, + max_bytes_length: int, + max_list_length: int, + mode: RandomizationMode, + chaos: bool) -> Any: """ Create an object for a given type, filled with random data. :param rng: The random number generator to use. @@ -44,94 +56,103 @@ def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list """ if chaos: mode = rng.choice(list(RandomizationMode)) - if isinstance(typ, str): + if is_bytes_type(typ): # Bytes array - if typ == 'bytes': - if mode == RandomizationMode.mode_nil_count: - return b'' - if mode == RandomizationMode.mode_max_count: - return get_random_bytes_list(rng, max_bytes_length) - if mode == RandomizationMode.mode_one_count: - return get_random_bytes_list(rng, 1) - if mode == RandomizationMode.mode_zero: - return b'\x00' - if mode == RandomizationMode.mode_max: - return b'\xff' - return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) - elif typ[:5] == 'bytes' and len(typ) > 5: - length = int(typ[5:]) - # Sanity, don't generate absurdly big random values - # If a client is aiming to performance-test, they should create a benchmark suite. - assert length <= max_bytes_length - if mode == RandomizationMode.mode_zero: - return b'\x00' * length - if mode == RandomizationMode.mode_max: - return b'\xff' * length - return get_random_bytes_list(rng, length) - # Basic types + if mode == RandomizationMode.mode_nil_count: + return b'' + elif mode == RandomizationMode.mode_max_count: + return get_random_bytes_list(rng, max_bytes_length) + elif mode == RandomizationMode.mode_one_count: + return get_random_bytes_list(rng, 1) + elif mode == RandomizationMode.mode_zero: + return b'\x00' + elif mode == RandomizationMode.mode_max: + return b'\xff' + else: + return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) + elif is_bytesn_type(typ): + # BytesN + length = typ.length + # Sanity, don't generate absurdly big random values + # If a client is aiming to performance-test, they should create a benchmark suite. + assert length <= max_bytes_length + if mode == RandomizationMode.mode_zero: + return b'\x00' * length + elif mode == RandomizationMode.mode_max: + return b'\xff' * length + else: + return get_random_bytes_list(rng, length) + elif is_basic_type(typ): + # Basic types + if mode == RandomizationMode.mode_zero: + return get_min_basic_value(typ) + elif mode == RandomizationMode.mode_max: + return get_max_basic_value(typ) else: - if mode == RandomizationMode.mode_zero: - return get_min_basic_value(typ) - if mode == RandomizationMode.mode_max: - return get_max_basic_value(typ) return get_random_basic_value(rng, typ) - # Vector: - elif isinstance(typ, list) and len(typ) == 2: - return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(typ[1])] - # List: - elif isinstance(typ, list) and len(typ) == 1: + elif is_vector_type(typ): + # Vector + elem_typ = read_vector_elem_type(typ) + return [ + get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos) + for _ in range(typ.length) + ] + elif is_list_type(typ): + # List + elem_typ = read_list_elem_type(typ) length = rng.randint(0, max_list_length) if mode == RandomizationMode.mode_one_count: length = 1 - if mode == RandomizationMode.mode_max_count: + elif mode == RandomizationMode.mode_max_count: length = max_list_length - return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(length)] - # Container: - elif hasattr(typ, 'fields'): - return typ(**{field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) for field, subtype in typ.fields.items()}) + + return [ + get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos) + for _ in range(length) + ] + elif is_container_type(typ): + # Container + return typ(**{ + field: + get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) + for field, subtype in typ.get_fields() + }) else: - print(typ) - raise Exception("Type not recognized") + raise Exception(f"Type not recognized: typ={typ}") def get_random_bytes_list(rng: Random, length: int) -> bytes: return bytes(rng.getrandbits(8) for _ in range(length)) -def get_random_basic_value(rng: Random, typ: str) -> Any: - if typ == 'bool': +def get_random_basic_value(rng: Random, typ) -> Any: + if is_bool_type(typ): return rng.choice((True, False)) - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES - return rng.randint(0, 2**size - 1) - if typ == 'byte': - return rng.randint(0, 8) + return rng.randint(0, 256**size - 1) else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") -def get_min_basic_value(typ: str) -> Any: - if typ == 'bool': +def get_min_basic_value(typ) -> Any: + if is_bool_type(typ): return False - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES return 0 - if typ == 'byte': - return 0x00 else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") -def get_max_basic_value(typ: str) -> Any: - if typ == 'bool': +def get_max_basic_value(typ) -> Any: + if is_bool_type(typ): return True - if typ[:4] == 'uint': - size = int(typ[4:]) + elif is_uint_type(typ): + size = uint_byte_size(typ) assert size in UINT_SIZES - return 2**size - 1 - if typ == 'byte': - return 0xff + return 256**size - 1 else: - raise ValueError("Not a basic type") + raise ValueError(f"Not a basic type: typ={typ}") diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py deleted file mode 100644 index 1bef358d4..000000000 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ /dev/null @@ -1,112 +0,0 @@ -from . import spec - - -from typing import ( - Any, - Callable, - List -) - -from .spec import ( - BeaconState, - BeaconBlock, - Slot, -) - - -def expected_deposit_count(state: BeaconState) -> int: - return min( - spec.MAX_DEPOSITS, - state.latest_eth1_data.deposit_count - state.deposit_index - ) - - -def process_operation_type(state: BeaconState, - operations: List[Any], - max_operations: int, - tx_fn: Callable[[BeaconState, Any], None]) -> None: - assert len(operations) <= max_operations - for operation in operations: - tx_fn(state, operation) - - -def process_operations(state: BeaconState, block: BeaconBlock) -> None: - process_operation_type( - state, - block.body.proposer_slashings, - spec.MAX_PROPOSER_SLASHINGS, - spec.process_proposer_slashing, - ) - - process_operation_type( - state, - block.body.attester_slashings, - spec.MAX_ATTESTER_SLASHINGS, - spec.process_attester_slashing, - ) - - process_operation_type( - state, - block.body.attestations, - spec.MAX_ATTESTATIONS, - spec.process_attestation, - ) - - assert len(block.body.deposits) == expected_deposit_count(state) - process_operation_type( - state, - block.body.deposits, - spec.MAX_DEPOSITS, - spec.process_deposit, - ) - - process_operation_type( - state, - block.body.voluntary_exits, - spec.MAX_VOLUNTARY_EXITS, - spec.process_voluntary_exit, - ) - - assert len(block.body.transfers) == len(set(block.body.transfers)) - process_operation_type( - state, - block.body.transfers, - spec.MAX_TRANSFERS, - spec.process_transfer, - ) - - -def process_block(state: BeaconState, - block: BeaconBlock, - verify_state_root: bool=False) -> None: - spec.process_block_header(state, block) - spec.process_randao(state, block) - spec.process_eth1_data(state, block) - - process_operations(state, block) - if verify_state_root: - spec.verify_block_state_root(state, block) - - -def process_epoch_transition(state: BeaconState) -> None: - spec.process_justification_and_finalization(state) - spec.process_crosslinks(state) - spec.process_rewards_and_penalties(state) - spec.process_registry_updates(state) - spec.process_slashings(state) - spec.process_final_updates(state) - - -def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState: - while state.slot < up_to: - spec.cache_state(state) - if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: - process_epoch_transition(state) - spec.advance_slot(state) - - -def state_transition(state: BeaconState, - block: BeaconBlock, - verify_state_root: bool=False) -> BeaconState: - state_transition_to(state, block.slot) - process_block(state, block, verify_state_root) diff --git a/test_libs/pyspec/eth2spec/phase1/__init__.py b/test_libs/pyspec/eth2spec/phase1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py deleted file mode 100644 index af6b39ef6..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py +++ /dev/null @@ -1,255 +0,0 @@ -from copy import deepcopy - -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_current_epoch, - process_attestation -) -from eth2spec.phase0.state_transition import ( - state_transition_to, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.attestations import ( - get_valid_attestation, - sign_attestation, -) -from eth2spec.test.helpers.state import ( - next_epoch, - next_slot, -) -from eth2spec.test.helpers.block import apply_empty_block - - -def run_attestation_processing(state, attestation, valid=True): - """ - Run ``process_attestation``, yielding: - - pre-state ('pre') - - attestation ('attestation') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - # yield pre-state - yield 'pre', state - - yield 'attestation', attestation - - # If the attestation is invalid, processing is aborted, and there is no post-state. - if not valid: - expect_assertion_error(lambda: process_attestation(state, attestation)) - yield 'post', None - return - - current_epoch_count = len(state.current_epoch_attestations) - previous_epoch_count = len(state.previous_epoch_attestations) - - # process attestation - process_attestation(state, attestation) - - # Make sure the attestation has been processed - if attestation.data.target_epoch == get_current_epoch(state): - assert len(state.current_epoch_attestations) == current_epoch_count + 1 - else: - assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 - - # yield post-state - yield 'post', state - - -@spec_state_test -def test_success(state): - attestation = get_valid_attestation(state, signed=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - yield from run_attestation_processing(state, attestation) - - -@spec_state_test -def test_success_previous_epoch(state): - attestation = get_valid_attestation(state, signed=True) - next_epoch(state) - apply_empty_block(state) - - yield from run_attestation_processing(state, attestation) - - -@always_bls -@spec_state_test -def test_invalid_attestation_signature(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_before_inclusion_delay(state): - attestation = get_valid_attestation(state, signed=True) - # do not increment slot to allow for inclusion delay - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_after_epoch_slots(state): - attestation = get_valid_attestation(state, signed=True) - # increment past latest inclusion slot - state_transition_to(state, state.slot + spec.SLOTS_PER_EPOCH + 1) - apply_empty_block(state) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_old_source_epoch(state): - state.slot = spec.SLOTS_PER_EPOCH * 5 - state.finalized_epoch = 2 - state.previous_justified_epoch = 3 - state.current_justified_epoch = 4 - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) - - # test logic sanity check: make sure the attestation is pointing to oldest known source epoch - assert attestation.data.source_epoch == state.previous_justified_epoch - - # Now go beyond that, it will be invalid - attestation.data.source_epoch -= 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_wrong_shard(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.shard += 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_new_source_epoch(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_epoch += 1 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_source_root_is_target_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_root = attestation.data.target_root - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_invalid_current_source_root(state): - state.slot = spec.SLOTS_PER_EPOCH * 5 - state.finalized_epoch = 2 - - state.previous_justified_epoch = 3 - state.previous_justified_root = b'\x01' * 32 - - state.current_justified_epoch = 4 - state.current_justified_root = b'\xff' * 32 - - attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - # Test logic sanity checks: - assert state.current_justified_root != state.previous_justified_root - assert attestation.data.source_root == state.previous_justified_root - - # Make attestation source root invalid: should be previous justified, not current one - attestation.data.source_root = state.current_justified_root - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_source_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.source_root = b'\x42' * 32 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_non_zero_crosslink_data_root(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.crosslink_data_root = b'\x42' * 32 - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_bad_previous_crosslink(state): - next_epoch(state) - apply_empty_block(state) - - attestation = get_valid_attestation(state, signed=True) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(state) - apply_empty_block(state) - - state.current_crosslinks[attestation.data.shard].epoch += 10 - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_inconsistent_bitfields(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00' - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_non_empty_custody_bitfield(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) - - -@spec_state_test -def test_empty_aggregation_bitfield(state): - attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) - - sign_attestation(state, attestation) - - yield from run_attestation_processing(state, attestation, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py deleted file mode 100644 index 28e232277..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py +++ /dev/null @@ -1,149 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_beacon_proposer_index, - process_attester_slashing, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.attestations import sign_indexed_attestation -from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.state import ( - get_balance, - next_epoch, -) - - -def run_attester_slashing_processing(state, attester_slashing, valid=True): - """ - Run ``process_attester_slashing``, yielding: - - pre-state ('pre') - - attester_slashing ('attester_slashing') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - yield 'pre', state - yield 'attester_slashing', attester_slashing - - if not valid: - expect_assertion_error(lambda: process_attester_slashing(state, attester_slashing)) - yield 'post', None - return - - slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] - pre_slashed_balance = get_balance(state, slashed_index) - - proposer_index = get_beacon_proposer_index(state) - pre_proposer_balance = get_balance(state, proposer_index) - - # Process slashing - process_attester_slashing(state, attester_slashing) - - slashed_validator = state.validator_registry[slashed_index] - - # Check slashing - assert slashed_validator.slashed - assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH - assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - - if slashed_index != proposer_index: - # lost whistleblower reward - assert get_balance(state, slashed_index) < pre_slashed_balance - # gained whistleblower reward - assert get_balance(state, proposer_index) > pre_proposer_balance - else: - # gained rewards for all slashings, which may include others. And only lost that of themselves. - # Netto at least 0, if more people where slashed, a balance increase. - assert get_balance(state, slashed_index) >= pre_slashed_balance - - yield 'post', state - - -@spec_state_test -def test_success_double(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) - - yield from run_attester_slashing_processing(state, attester_slashing) - - -@spec_state_test -def test_success_surround(state): - next_epoch(state) - apply_empty_block(state) - - state.current_justified_epoch += 1 - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - # set attestion1 to surround attestation 2 - attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 - attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1 - - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing) - - -@always_bls -@spec_state_test -def test_invalid_sig_1(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_2(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=False) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_1_and_2(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=False) - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_same_data(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.data = attester_slashing.attestation_2.data - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_no_double_or_surround(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.data.target_epoch += 1 - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_participants_already_slashed(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) - - # set all indices to slashed - attestation_1 = attester_slashing.attestation_1 - validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices - for index in validator_indices: - state.validator_registry[index].slashed = True - - yield from run_attester_slashing_processing(state, attester_slashing, False) - - -@spec_state_test -def test_custody_bit_0_and_1(state): - attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True) - - attester_slashing.attestation_1.custody_bit_1_indices = ( - attester_slashing.attestation_1.custody_bit_0_indices - ) - sign_indexed_attestation(state, attester_slashing.attestation_1) - - yield from run_attester_slashing_processing(state, attester_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py deleted file mode 100644 index 454f557c5..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py +++ /dev/null @@ -1,87 +0,0 @@ -from copy import deepcopy - -from eth2spec.phase0.spec import ( - get_beacon_proposer_index, - cache_state, - advance_slot, - process_block_header, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, - sign_block -) -from eth2spec.test.helpers.state import next_slot - - -def prepare_state_for_header_processing(state): - cache_state(state) - advance_slot(state) - - -def run_block_header_processing(state, block, valid=True): - """ - Run ``process_block_header``, yielding: - - pre-state ('pre') - - block ('block') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - prepare_state_for_header_processing(state) - - yield 'pre', state - yield 'block', block - - if not valid: - expect_assertion_error(lambda: process_block_header(state, block)) - yield 'post', None - return - - process_block_header(state, block) - yield 'post', state - - -@spec_state_test -def test_success_block_header(state): - block = build_empty_block_for_next_slot(state, signed=True) - yield from run_block_header_processing(state, block) - - -@always_bls -@spec_state_test -def test_invalid_sig_block_header(state): - block = build_empty_block_for_next_slot(state) - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_invalid_slot_block_header(state): - block = build_empty_block_for_next_slot(state) - block.slot = state.slot + 2 # invalid slot - sign_block(state, block) - - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_invalid_previous_block_root(state): - block = build_empty_block_for_next_slot(state) - block.previous_block_root = b'\12' * 32 # invalid prev root - sign_block(state, block) - - yield from run_block_header_processing(state, block, valid=False) - - -@spec_state_test -def test_proposer_slashed(state): - # use stub state to get proposer index of next slot - stub_state = deepcopy(state) - next_slot(stub_state) - proposer_index = get_beacon_proposer_index(stub_state) - - # set proposer to slashed - state.validator_registry[proposer_index].slashed = True - - block = build_empty_block_for_next_slot(state, signed=True) - - yield from run_block_header_processing(state, block, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py deleted file mode 100644 index 95b13c779..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py +++ /dev/null @@ -1,140 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import process_deposit -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.deposits import prepare_state_and_deposit, sign_deposit_data -from eth2spec.test.helpers.state import get_balance -from eth2spec.test.helpers.keys import privkeys - - -def run_deposit_processing(state, deposit, validator_index, valid=True, effective=True): - """ - Run ``process_deposit``, yielding: - - pre-state ('pre') - - deposit ('deposit') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - pre_validator_count = len(state.validator_registry) - pre_balance = 0 - if validator_index < pre_validator_count: - pre_balance = get_balance(state, validator_index) - else: - # if it is a new validator, it should be right at the end of the current registry. - assert validator_index == pre_validator_count - - yield 'pre', state - yield 'deposit', deposit - - if not valid: - expect_assertion_error(lambda: process_deposit(state, deposit)) - yield 'post', None - return - - process_deposit(state, deposit) - - yield 'post', state - - if not effective: - assert len(state.validator_registry) == pre_validator_count - assert len(state.balances) == pre_validator_count - if validator_index < pre_validator_count: - assert get_balance(state, validator_index) == pre_balance - else: - if validator_index < pre_validator_count: - # top-up - assert len(state.validator_registry) == pre_validator_count - assert len(state.balances) == pre_validator_count - else: - # new validator - assert len(state.validator_registry) == pre_validator_count + 1 - assert len(state.balances) == pre_validator_count + 1 - assert get_balance(state, validator_index) == pre_balance + deposit.data.amount - - assert state.deposit_index == state.latest_eth1_data.deposit_count - - -@spec_state_test -def test_new_deposit(state): - # fresh deposit = next validator index = validator appended to registry - validator_index = len(state.validator_registry) - amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) - - yield from run_deposit_processing(state, deposit, validator_index) - - -@always_bls -@spec_state_test -def test_invalid_sig_new_deposit(state): - # fresh deposit = next validator index = validator appended to registry - validator_index = len(state.validator_registry) - amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=False) - - -@spec_state_test -def test_success_top_up(state): - validator_index = 0 - amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) - - yield from run_deposit_processing(state, deposit, validator_index) - - -@always_bls -@spec_state_test -def test_invalid_sig_top_up(state): - validator_index = 0 - amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount) - - # invalid signatures, in top-ups, are allowed! - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True) - - -@spec_state_test -def test_invalid_withdrawal_credentials_top_up(state): - validator_index = 0 - amount = spec.MAX_EFFECTIVE_BALANCE // 4 - withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(b"junk")[1:] - deposit = prepare_state_and_deposit( - state, - validator_index, - amount, - withdrawal_credentials=withdrawal_credentials, - ) - - # inconsistent withdrawal credentials, in top-ups, are allowed! - yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True) - - -@spec_state_test -def test_wrong_index(state): - validator_index = len(state.validator_registry) - amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) - - # mess up deposit_index - deposit.index = state.deposit_index + 1 - - sign_deposit_data(state, deposit.data, privkeys[validator_index]) - - yield from run_deposit_processing(state, deposit, validator_index, valid=False) - - -# TODO: test invalid signature - - -@spec_state_test -def test_bad_merkle_proof(state): - validator_index = len(state.validator_registry) - amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount) - - # mess up merkle branch - deposit.proof[-1] = spec.ZERO_HASH - - sign_deposit_data(state, deposit.data, privkeys[validator_index]) - - yield from run_deposit_processing(state, deposit, validator_index, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py deleted file mode 100644 index 07ccc25f1..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py +++ /dev/null @@ -1,137 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_current_epoch, - process_proposer_slashing, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.block_header import sign_block_header -from eth2spec.test.helpers.keys import privkeys -from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing -from eth2spec.test.helpers.state import get_balance - - -def run_proposer_slashing_processing(state, proposer_slashing, valid=True): - """ - Run ``process_proposer_slashing``, yielding: - - pre-state ('pre') - - proposer_slashing ('proposer_slashing') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - yield 'pre', state - yield 'proposer_slashing', proposer_slashing - - if not valid: - expect_assertion_error(lambda: process_proposer_slashing(state, proposer_slashing)) - yield 'post', None - return - - pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index) - - process_proposer_slashing(state, proposer_slashing) - yield 'post', state - - # check if slashed - slashed_validator = state.validator_registry[proposer_slashing.proposer_index] - assert slashed_validator.slashed - assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH - assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH - - # lost whistleblower reward - assert ( - get_balance(state, proposer_slashing.proposer_index) < - pre_proposer_balance - ) - - -@spec_state_test -def test_success(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - yield from run_proposer_slashing_processing(state, proposer_slashing) - - -@always_bls -@spec_state_test -def test_invalid_sig_1(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=True) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_2(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@always_bls -@spec_state_test -def test_invalid_sig_1_and_2(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=False) - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_invalid_proposer_index(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - # Index just too high (by 1) - proposer_slashing.proposer_index = len(state.validator_registry) - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_epochs_are_different(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - - # set slots to be in different epochs - proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH - sign_block_header(state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index]) - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_headers_are_same(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False) - - # set headers to be the same - proposer_slashing.header_2 = proposer_slashing.header_1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_not_activated(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # set proposer to be not active yet - state.validator_registry[proposer_slashing.proposer_index].activation_epoch = get_current_epoch(state) + 1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_slashed(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # set proposer to slashed - state.validator_registry[proposer_slashing.proposer_index].slashed = True - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) - - -@spec_state_test -def test_proposer_is_withdrawn(state): - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) - - # move 1 epoch into future, to allow for past withdrawable epoch - state.slot += spec.SLOTS_PER_EPOCH - # set proposer withdrawable_epoch in past - current_epoch = get_current_epoch(state) - proposer_index = proposer_slashing.proposer_index - state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 - - yield from run_proposer_slashing_processing(state, proposer_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py deleted file mode 100644 index 83af75574..000000000 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py +++ /dev/null @@ -1,172 +0,0 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_active_validator_indices, - get_beacon_proposer_index, - get_current_epoch, - process_transfer, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls -from eth2spec.test.helpers.state import next_epoch -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.transfers import get_valid_transfer - - -def run_transfer_processing(state, transfer, valid=True): - """ - Run ``process_transfer``, yielding: - - pre-state ('pre') - - transfer ('transfer') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - proposer_index = get_beacon_proposer_index(state) - pre_transfer_sender_balance = state.balances[transfer.sender] - pre_transfer_recipient_balance = state.balances[transfer.recipient] - pre_transfer_proposer_balance = state.balances[proposer_index] - - yield 'pre', state - yield 'transfer', transfer - - if not valid: - expect_assertion_error(lambda: process_transfer(state, transfer)) - yield 'post', None - return - - process_transfer(state, transfer) - yield 'post', state - - sender_balance = state.balances[transfer.sender] - recipient_balance = state.balances[transfer.recipient] - assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee - assert recipient_balance == pre_transfer_recipient_balance + transfer.amount - assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee - - -@spec_state_test -def test_success_non_activated(state): - transfer = get_valid_transfer(state, signed=True) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_withdrawable(state): - next_epoch(state) - apply_empty_block(state) - - transfer = get_valid_transfer(state, signed=True) - - # withdrawable_epoch in past so can transfer - state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1 - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_active_above_max_effective(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - - yield from run_transfer_processing(state, transfer) - - -@spec_state_test -def test_success_active_above_max_effective_fee(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) - - yield from run_transfer_processing(state, transfer) - - -@always_bls -@spec_state_test -def test_invalid_signature(state): - transfer = get_valid_transfer(state) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_active_but_transfer_past_effective_balance(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - amount = spec.MAX_EFFECTIVE_BALANCE // 32 - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0, signed=True) - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_incorrect_slot(state): - transfer = get_valid_transfer(state, slot=state.slot + 1, signed=True) - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_insufficient_balance_for_fee(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_insufficient_balance(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_no_dust_sender(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - balance = state.balances[sender_index] - transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0, signed=True) - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_no_dust_recipient(state): - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True) - state.balances[transfer.recipient] = 0 - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) - - -@spec_state_test -def test_invalid_pubkey(state): - transfer = get_valid_transfer(state, signed=True) - state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH - - # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(state, transfer, False) diff --git a/test_libs/pyspec/eth2spec/test/conftest.py b/test_libs/pyspec/eth2spec/test/conftest.py index dadb0d5d0..5713c3470 100644 --- a/test_libs/pyspec/eth2spec/test/conftest.py +++ b/test_libs/pyspec/eth2spec/test/conftest.py @@ -1,8 +1,10 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 # We import pytest only when it's present, i.e. when we are running tests. # The test-cases themselves can be generated without installing pytest. + def module_exists(module_name): try: __import__(module_name) @@ -33,4 +35,5 @@ def config(request): config_name = request.config.getoption("--config") from preset_loader import loader presets = loader.load_presets('../../configs/', config_name) - spec.apply_constants_preset(presets) + spec_phase0.apply_constants_preset(presets) + spec_phase1.apply_constants_preset(presets) diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 2be9322de..cbc594cd8 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -1,12 +1,20 @@ -from eth2spec.phase0 import spec +from eth2spec.phase0 import spec as spec_phase0 +from eth2spec.phase1 import spec as spec_phase1 from eth2spec.utils import bls from .helpers.genesis import create_genesis_state -from .utils import spectest, with_args, with_tags +from .utils import spectest, with_tags -# Provides a genesis state as first argument to the function decorated with this -with_state = with_args(lambda: [create_genesis_state(spec.SLOTS_PER_EPOCH * 8)]) + +def with_state(fn): + def entry(*args, **kw): + try: + kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 8) + except KeyError: + raise TypeError('Spec decorator must come before state decorator to inject spec into state.') + return fn(*args, **kw) + return entry # BLS is turned off by default *for performance purposes during TESTING*. @@ -80,3 +88,40 @@ def bls_switch(fn): bls.bls_active = old_state return out return entry + + +all_phases = ['phase0', 'phase1'] + + +def with_all_phases(fn): + """ + A decorator for running a test wil every phase + """ + return with_phases(all_phases)(fn) + + +def with_all_phases_except(exclusion_phases): + """ + A decorator factory for running a tests with every phase except the ones listed + """ + def decorator(fn): + return with_phases([phase for phase in all_phases if phase not in exclusion_phases])(fn) + return decorator + + +def with_phases(phases): + """ + Decorator factory that returns a decorator that runs a test for the appropriate phases + """ + def decorator(fn): + def run_with_spec_version(spec, *args, **kw): + kw['spec'] = spec + fn(*args, **kw) + + def wrapper(*args, **kw): + if 'phase0' in phases: + run_with_spec_version(spec_phase0, *args, **kw) + if 'phase1' in phases: + run_with_spec_version(spec_phase1, *args, **kw) + return wrapper + return decorator diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py deleted file mode 100644 index cfbcd1883..000000000 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py +++ /dev/null @@ -1,150 +0,0 @@ -from copy import deepcopy - -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - cache_state, - get_crosslink_deltas, - process_crosslinks, -) -from eth2spec.phase0.state_transition import ( - state_transition, -) -from eth2spec.test.context import spec_state_test -from eth2spec.test.helpers.state import ( - next_epoch, - next_slot -) -from eth2spec.test.helpers.block import apply_empty_block, sign_block -from eth2spec.test.helpers.attestations import ( - add_attestation_to_state, - build_empty_block_for_next_slot, - fill_aggregate_attestation, - get_crosslink_committee, - get_valid_attestation, - sign_attestation, -) - - -def run_process_crosslinks(state, valid=True): - """ - Run ``process_crosslinks``, yielding: - - pre-state ('pre') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - # transition state to slot before state transition - slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 - block = build_empty_block_for_next_slot(state) - block.slot = slot - sign_block(state, block) - state_transition(state, block) - - # cache state before epoch transition - cache_state(state) - - yield 'pre', state - process_crosslinks(state) - yield 'post', state - - -@spec_state_test -def test_no_attestations(state): - yield from run_process_crosslinks(state) - - for shard in range(spec.SHARD_COUNT): - assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] - - -@spec_state_test -def test_single_crosslink_update_from_current_epoch(state): - next_epoch(state) - - attestation = get_valid_attestation(state, signed=True) - - fill_aggregate_attestation(state, attestation) - add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) - - assert len(state.current_epoch_attestations) == 1 - - shard = attestation.data.shard - pre_crosslink = deepcopy(state.current_crosslinks[shard]) - - yield from run_process_crosslinks(state) - - assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] - assert pre_crosslink != state.current_crosslinks[shard] - - -@spec_state_test -def test_single_crosslink_update_from_previous_epoch(state): - next_epoch(state) - - attestation = get_valid_attestation(state, signed=True) - - fill_aggregate_attestation(state, attestation) - add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) - - assert len(state.previous_epoch_attestations) == 1 - - shard = attestation.data.shard - pre_crosslink = deepcopy(state.current_crosslinks[shard]) - - crosslink_deltas = get_crosslink_deltas(state) - - yield from run_process_crosslinks(state) - - assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] - assert pre_crosslink != state.current_crosslinks[shard] - - # ensure rewarded - for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard): - assert crosslink_deltas[0][index] > 0 - assert crosslink_deltas[1][index] == 0 - - -@spec_state_test -def test_double_late_crosslink(state): - if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT: - print("warning: ignoring test, test-assumptions are incompatible with configuration") - return - - next_epoch(state) - state.slot += 4 - - attestation_1 = get_valid_attestation(state, signed=True) - fill_aggregate_attestation(state, attestation_1) - - # add attestation_1 to next epoch - next_epoch(state) - add_attestation_to_state(state, attestation_1, state.slot + 1) - - for slot in range(spec.SLOTS_PER_EPOCH): - attestation_2 = get_valid_attestation(state) - if attestation_2.data.shard == attestation_1.data.shard: - sign_attestation(state, attestation_2) - break - next_slot(state) - apply_empty_block(state) - - fill_aggregate_attestation(state, attestation_2) - - # add attestation_2 in the next epoch after attestation_1 has - # already updated the relevant crosslink - next_epoch(state) - add_attestation_to_state(state, attestation_2, state.slot + 1) - - assert len(state.previous_epoch_attestations) == 1 - assert len(state.current_epoch_attestations) == 0 - - crosslink_deltas = get_crosslink_deltas(state) - - yield from run_process_crosslinks(state) - - shard = attestation_2.data.shard - - # ensure that the current crosslinks were not updated by the second attestation - assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] - # ensure no reward, only penalties for the failed crosslink - for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.shard): - assert crosslink_deltas[0][index] == 0 - assert crosslink_deltas[1][index] > 0 diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index b541e610f..4c8b5c7eb 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -1,39 +1,27 @@ from typing import List -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - Attestation, - AttestationData, - AttestationDataAndCustodyBit, - get_epoch_start_slot, get_block_root, get_current_epoch, get_previous_epoch, slot_to_epoch, - get_crosslink_committee, get_domain, IndexedAttestation, get_attesting_indices, BeaconState, get_block_root_at_slot, - get_epoch_start_shard, get_epoch_committee_count) -from eth2spec.phase0.state_transition import ( - state_transition, state_transition_to -) from eth2spec.test.helpers.bitfields import set_bitfield_bit from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root -def build_attestation_data(state, slot, shard): +def build_attestation_data(spec, state, slot, shard): assert state.slot >= slot if slot == state.slot: - block_root = build_empty_block_for_next_slot(state).previous_block_root + block_root = build_empty_block_for_next_slot(spec, state).parent_root else: - block_root = get_block_root_at_slot(state, slot) + block_root = spec.get_block_root_at_slot(state, slot) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + current_epoch_start_slot = spec.get_epoch_start_slot(spec.get_current_epoch(state)) if slot < current_epoch_start_slot: - epoch_boundary_root = get_block_root(state, get_previous_epoch(state)) + epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state)) elif slot == current_epoch_start_slot: epoch_boundary_root = block_root else: - epoch_boundary_root = get_block_root(state, get_current_epoch(state)) + epoch_boundary_root = spec.get_block_root(state, spec.get_current_epoch(state)) if slot < current_epoch_start_slot: justified_epoch = state.previous_justified_epoch @@ -42,56 +30,68 @@ def build_attestation_data(state, slot, shard): justified_epoch = state.current_justified_epoch justified_block_root = state.current_justified_root - crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch( - state) else state.previous_crosslinks - return AttestationData( - shard=shard, + if spec.slot_to_epoch(slot) == spec.get_current_epoch(state): + parent_crosslink = state.current_crosslinks[shard] + else: + parent_crosslink = state.previous_crosslinks[shard] + + return spec.AttestationData( beacon_block_root=block_root, source_epoch=justified_epoch, source_root=justified_block_root, - target_epoch=slot_to_epoch(slot), + target_epoch=spec.slot_to_epoch(slot), target_root=epoch_boundary_root, - crosslink_data_root=spec.ZERO_HASH, - previous_crosslink_root=hash_tree_root(crosslinks[shard]), + crosslink=spec.Crosslink( + shard=shard, + start_epoch=parent_crosslink.end_epoch, + end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK), + data_root=spec.ZERO_HASH, + parent_root=hash_tree_root(parent_crosslink), + ), ) -def get_valid_attestation(state, slot=None, signed=False): +def get_valid_attestation(spec, state, slot=None, signed=False): if slot is None: slot = state.slot - epoch = slot_to_epoch(slot) - epoch_start_shard = get_epoch_start_shard(state, epoch) - committees_per_slot = get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH + epoch = spec.slot_to_epoch(slot) + epoch_start_shard = spec.get_epoch_start_shard(state, epoch) + committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT - attestation_data = build_attestation_data(state, slot, shard) + attestation_data = build_attestation_data(spec, state, slot, shard) - crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard) + crosslink_committee = spec.get_crosslink_committee( + state, + attestation_data.target_epoch, + attestation_data.crosslink.shard + ) committee_size = len(crosslink_committee) bitfield_length = (committee_size + 7) // 8 aggregation_bitfield = b'\x00' * bitfield_length custody_bitfield = b'\x00' * bitfield_length - attestation = Attestation( + attestation = spec.Attestation( aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, ) - fill_aggregate_attestation(state, attestation) + fill_aggregate_attestation(spec, state, attestation) if signed: - sign_attestation(state, attestation) + sign_attestation(spec, state, attestation) return attestation -def sign_aggregate_attestation(state: BeaconState, data: AttestationData, participants: List[int]): +def sign_aggregate_attestation(spec, state, attestation_data, participants: List[int]): signatures = [] for validator_index in participants: privkey = privkeys[validator_index] signatures.append( get_attestation_signature( + spec, state, - data, + attestation_data, privkey ) ) @@ -99,23 +99,23 @@ def sign_aggregate_attestation(state: BeaconState, data: AttestationData, partic return bls_aggregate_signatures(signatures) -def sign_indexed_attestation(state, indexed_attestation: IndexedAttestation): +def sign_indexed_attestation(spec, state, indexed_attestation): participants = indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices - indexed_attestation.signature = sign_aggregate_attestation(state, indexed_attestation.data, participants) + indexed_attestation.signature = sign_aggregate_attestation(spec, state, indexed_attestation.data, participants) -def sign_attestation(state, attestation: Attestation): - participants = get_attesting_indices( +def sign_attestation(spec, state, attestation): + participants = spec.get_attesting_indices( state, attestation.data, attestation.aggregation_bitfield, ) - attestation.signature = sign_aggregate_attestation(state, attestation.data, participants) + attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants) -def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): - message_hash = AttestationDataAndCustodyBit( +def get_attestation_signature(spec, state, attestation_data, privkey, custody_bit=0b0): + message_hash = spec.AttestationDataAndCustodyBit( data=attestation_data, custody_bit=custody_bit, ).hash_tree_root() @@ -123,7 +123,7 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) return bls_sign( message_hash=message_hash, privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_ATTESTATION, message_epoch=attestation_data.target_epoch, @@ -131,16 +131,20 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) ) -def fill_aggregate_attestation(state, attestation): - crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard) +def fill_aggregate_attestation(spec, state, attestation): + crosslink_committee = spec.get_crosslink_committee( + state, + attestation.data.target_epoch, + attestation.data.crosslink.shard, + ) for i in range(len(crosslink_committee)): attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) -def add_attestation_to_state(state, attestation, slot): - block = build_empty_block_for_next_slot(state) +def add_attestation_to_state(spec, state, attestation, slot): + block = build_empty_block_for_next_slot(spec, state) block.slot = slot block.body.attestations.append(attestation) - state_transition_to(state, block.slot) - sign_block(state, block) - state_transition(state, block) + spec.process_slots(state, block.slot) + sign_block(spec, state, block) + spec.state_transition(state, block) diff --git a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py index d19b41dfe..9fd34520c 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py @@ -1,19 +1,18 @@ from copy import deepcopy -from eth2spec.phase0.spec import AttesterSlashing, convert_to_indexed from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation -def get_valid_attester_slashing(state, signed_1=False, signed_2=False): - attestation_1 = get_valid_attestation(state, signed=signed_1) +def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False): + attestation_1 = get_valid_attestation(spec, state, signed=signed_1) attestation_2 = deepcopy(attestation_1) attestation_2.data.target_root = b'\x01' * 32 if signed_2: - sign_attestation(state, attestation_2) + sign_attestation(spec, state, attestation_2) - return AttesterSlashing( - attestation_1=convert_to_indexed(state, attestation_1), - attestation_2=convert_to_indexed(state, attestation_2), + return spec.AttesterSlashing( + attestation_1=spec.convert_to_indexed(state, attestation_1), + attestation_2=spec.convert_to_indexed(state, attestation_2), ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py index 7c25d073a..50e5b6cba 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py +++ b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py @@ -5,7 +5,7 @@ def set_bitfield_bit(bitfield, i): byte_index = i // 8 bit_index = i % 8 return ( - bitfield[:byte_index] + - bytes([bitfield[byte_index] | (1 << bit_index)]) + - bitfield[byte_index + 1:] + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index + 1:] ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py index 81c5e9ef5..5c7cb02a0 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/block.py @@ -1,77 +1,73 @@ from copy import deepcopy -from eth2spec.phase0 import spec -from eth2spec.phase0.spec import get_beacon_proposer_index, slot_to_epoch, get_domain, BeaconBlock -from eth2spec.phase0.state_transition import state_transition, state_transition_to from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, only_with_bls -from eth2spec.utils.minimal_ssz import signing_root, hash_tree_root +from eth2spec.utils.ssz.ssz_impl import signing_root, hash_tree_root # Fully ignore the function if BLS is off, beacon-proposer index calculation is slow. @only_with_bls() -def sign_block(state, block, proposer_index=None): +def sign_block(spec, state, block, proposer_index=None): assert state.slot <= block.slot if proposer_index is None: if block.slot == state.slot: - proposer_index = get_beacon_proposer_index(state) + proposer_index = spec.get_beacon_proposer_index(state) else: - if slot_to_epoch(state.slot) + 1 > slot_to_epoch(block.slot): + if spec.slot_to_epoch(state.slot) + 1 > spec.slot_to_epoch(block.slot): print("warning: block slot far away, and no proposer index manually given." " Signing block is slow due to transition for proposer index calculation.") # use stub state to get proposer index of future slot stub_state = deepcopy(state) - state_transition_to(stub_state, block.slot) - proposer_index = get_beacon_proposer_index(stub_state) + spec.process_slots(stub_state, block.slot) + proposer_index = spec.get_beacon_proposer_index(stub_state) privkey = privkeys[proposer_index] block.body.randao_reveal = bls_sign( privkey=privkey, - message_hash=hash_tree_root(slot_to_epoch(block.slot)), - domain=get_domain( + message_hash=hash_tree_root(spec.slot_to_epoch(block.slot)), + domain=spec.get_domain( state, - message_epoch=slot_to_epoch(block.slot), + message_epoch=spec.slot_to_epoch(block.slot), domain_type=spec.DOMAIN_RANDAO, ) ) block.signature = bls_sign( message_hash=signing_root(block), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state, spec.DOMAIN_BEACON_PROPOSER, - slot_to_epoch(block.slot))) + spec.slot_to_epoch(block.slot))) -def apply_empty_block(state): +def apply_empty_block(spec, state): """ Transition via an empty block (on current slot, assuming no block has been applied yet). :return: the empty block that triggered the transition. """ - block = build_empty_block(state, signed=True) - state_transition(state, block) + block = build_empty_block(spec, state, signed=True) + spec.state_transition(state, block) return block -def build_empty_block(state, slot=None, signed=False): +def build_empty_block(spec, state, slot=None, signed=False): if slot is None: slot = state.slot - empty_block = BeaconBlock() + empty_block = spec.BeaconBlock() empty_block.slot = slot empty_block.body.eth1_data.deposit_count = state.deposit_index previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = signing_root(previous_block_header) + empty_block.parent_root = signing_root(previous_block_header) if signed: - sign_block(state, empty_block) + sign_block(spec, state, empty_block) return empty_block -def build_empty_block_for_next_slot(state, signed=False): - return build_empty_block(state, state.slot + 1, signed=signed) - +def build_empty_block_for_next_slot(spec, state, signed=False): + return build_empty_block(spec, state, state.slot + 1, signed=signed) diff --git a/test_libs/pyspec/eth2spec/test/helpers/block_header.py b/test_libs/pyspec/eth2spec/test/helpers/block_header.py index 9aba62d37..456414112 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/block_header.py +++ b/test_libs/pyspec/eth2spec/test/helpers/block_header.py @@ -1,13 +1,9 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_domain from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def sign_block_header(state, header, privkey): - domain = get_domain( +def sign_block_header(spec, state, header, privkey): + domain = spec.get_domain( state=state, domain_type=spec.DOMAIN_BEACON_PROPOSER, ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py new file mode 100644 index 000000000..67df12fcd --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py @@ -0,0 +1,38 @@ +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import bls_sign + + +def get_valid_early_derived_secret_reveal(spec, state, epoch=None): + current_epoch = spec.get_current_epoch(state) + revealed_index = spec.get_active_validator_indices(state, current_epoch)[-1] + masker_index = spec.get_active_validator_indices(state, current_epoch)[0] + + if epoch is None: + epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING + + reveal = bls_sign( + message_hash=spec.hash_tree_root(epoch), + privkey=privkeys[revealed_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch, + ), + ) + mask = bls_sign( + message_hash=spec.hash_tree_root(epoch), + privkey=privkeys[masker_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch, + ), + ) + + return spec.EarlyDerivedSecretReveal( + revealed_index=revealed_index, + epoch=epoch, + reveal=reveal, + masker_index=masker_index, + mask=mask, + ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py index d28d0bcb8..8f437ec89 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/deposits.py +++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py @@ -1,29 +1,25 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_domain, DepositData, verify_merkle_branch, Deposit, ZERO_HASH from eth2spec.test.helpers.keys import pubkeys, privkeys from eth2spec.utils.bls import bls_sign from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def build_deposit_data(state, pubkey, privkey, amount, withdrawal_credentials, signed=False): - deposit_data = DepositData( +def build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed=False): + deposit_data = spec.DepositData( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, ) if signed: - sign_deposit_data(state, deposit_data, privkey) + sign_deposit_data(spec, state, deposit_data, privkey) return deposit_data -def sign_deposit_data(state, deposit_data, privkey): +def sign_deposit_data(spec, state, deposit_data, privkey): signature = bls_sign( message_hash=signing_root(deposit_data), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state, spec.DOMAIN_DEPOSIT, ) @@ -31,14 +27,15 @@ def sign_deposit_data(state, deposit_data, privkey): deposit_data.signature = signature -def build_deposit(state, +def build_deposit(spec, + state, deposit_data_leaves, pubkey, privkey, amount, withdrawal_credentials, signed): - deposit_data = build_deposit_data(state, pubkey, privkey, amount, withdrawal_credentials, signed) + deposit_data = build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed) item = deposit_data.hash_tree_root() index = len(deposit_data_leaves) @@ -46,9 +43,9 @@ def build_deposit(state, tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) root = get_merkle_root((tuple(deposit_data_leaves))) proof = list(get_merkle_proof(tree, item_index=index)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) - deposit = Deposit( + deposit = spec.Deposit( proof=list(proof), index=index, data=deposit_data, @@ -57,13 +54,13 @@ def build_deposit(state, return deposit, root, deposit_data_leaves -def prepare_state_and_deposit(state, validator_index, amount, withdrawal_credentials=None, signed=False): +def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False): """ Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount. """ pre_validator_count = len(state.validator_registry) # fill previous deposits with zero-hash - deposit_data_leaves = [ZERO_HASH] * pre_validator_count + deposit_data_leaves = [spec.ZERO_HASH] * pre_validator_count pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] @@ -73,6 +70,7 @@ def prepare_state_and_deposit(state, validator_index, amount, withdrawal_credent withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:] deposit, root, deposit_data_leaves = build_deposit( + spec, state, deposit_data_leaves, pubkey, diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index 01011cacd..83af56621 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -1,12 +1,8 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import Eth1Data, ZERO_HASH, get_active_validator_indices from eth2spec.test.helpers.keys import pubkeys -from eth2spec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.ssz.ssz_impl import hash_tree_root -def build_mock_validator(i: int, balance: int): +def build_mock_validator(spec, i: int, balance: int): pubkey = pubkeys[i] # insecurely use pubkey as withdrawal key as well withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:] @@ -21,22 +17,22 @@ def build_mock_validator(i: int, balance: int): ) -def create_genesis_state(num_validators): +def create_genesis_state(spec, num_validators): deposit_root = b'\x42' * 32 state = spec.BeaconState( genesis_time=0, deposit_index=num_validators, - latest_eth1_data=Eth1Data( + latest_eth1_data=spec.Eth1Data( deposit_root=deposit_root, deposit_count=num_validators, - block_hash=ZERO_HASH, + block_hash=spec.ZERO_HASH, )) # We "hack" in the initial validators, # as it is much faster than creating and processing genesis deposits for every single test case. state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators - state.validator_registry = [build_mock_validator(i, state.balances[i]) for i in range(num_validators)] + state.validator_registry = [build_mock_validator(spec, i, state.balances[i]) for i in range(num_validators)] # Process genesis activations for validator in state.validator_registry: @@ -44,7 +40,7 @@ def create_genesis_state(num_validators): validator.activation_eligibility_epoch = spec.GENESIS_EPOCH validator.activation_epoch = spec.GENESIS_EPOCH - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, spec.GENESIS_EPOCH)) + genesis_active_index_root = hash_tree_root(spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)) for index in range(spec.LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py index dfb8895dc..86c6acf47 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py +++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -1,34 +1,31 @@ from copy import deepcopy -from eth2spec.phase0.spec import ( - get_current_epoch, get_active_validator_indices, BeaconBlockHeader, ProposerSlashing -) from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import pubkey_to_privkey -def get_valid_proposer_slashing(state, signed_1=False, signed_2=False): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[-1] +def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] slot = state.slot - header_1 = BeaconBlockHeader( + header_1 = spec.BeaconBlockHeader( slot=slot, - previous_block_root=b'\x33' * 32, + parent_root=b'\x33' * 32, state_root=b'\x44' * 32, block_body_root=b'\x55' * 32, ) header_2 = deepcopy(header_1) - header_2.previous_block_root = b'\x99' * 32 + header_2.parent_root = b'\x99' * 32 header_2.slot = slot + 1 if signed_1: - sign_block_header(state, header_1, privkey) + sign_block_header(spec, state, header_1, privkey) if signed_2: - sign_block_header(state, header_2, privkey) + sign_block_header(spec, state, header_2, privkey) - return ProposerSlashing( + return spec.ProposerSlashing( proposer_index=validator_index, header_1=header_1, header_2=header_2, diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index e720a9709..63aa27d70 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -1,29 +1,23 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.state_transition import state_transition_to - - def get_balance(state, index): return state.balances[index] -def next_slot(state): +def next_slot(spec, state): """ Transition to the next slot. """ - state_transition_to(state, state.slot + 1) + spec.process_slots(state, state.slot + 1) -def next_epoch(state): +def next_epoch(spec, state): """ Transition to the start slot of the next epoch """ slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - state_transition_to(state, slot) + spec.process_slots(state, slot) -def get_state_root(state, slot) -> bytes: +def get_state_root(spec, state, slot) -> bytes: """ Return the state root at a recent ``slot``. """ diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py index 2045f48ad..e619c5569 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/transfers.py +++ b/test_libs/pyspec/eth2spec/test/helpers/transfers.py @@ -1,20 +1,16 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import get_current_epoch, get_active_validator_indices, Transfer, get_domain from eth2spec.test.helpers.keys import pubkeys, privkeys from eth2spec.test.helpers.state import get_balance from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None, signed=False): +def get_valid_transfer(spec, state, slot=None, sender_index=None, amount=None, fee=None, signed=False): if slot is None: slot = state.slot - current_epoch = get_current_epoch(state) + current_epoch = spec.get_current_epoch(state) if sender_index is None: - sender_index = get_active_validator_indices(state, current_epoch)[-1] - recipient_index = get_active_validator_indices(state, current_epoch)[0] + sender_index = spec.get_active_validator_indices(state, current_epoch)[-1] + recipient_index = spec.get_active_validator_indices(state, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] @@ -23,7 +19,7 @@ def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=Non if amount is None: amount = get_balance(state, sender_index) - fee - transfer = Transfer( + transfer = spec.Transfer( sender=sender_index, recipient=recipient_index, amount=amount, @@ -32,24 +28,24 @@ def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=Non pubkey=transfer_pubkey, ) if signed: - sign_transfer(state, transfer, transfer_privkey) + sign_transfer(spec, state, transfer, transfer_privkey) # ensure withdrawal_credentials reproducible state.validator_registry[transfer.sender].withdrawal_credentials = ( - spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] + spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] ) return transfer -def sign_transfer(state, transfer, privkey): +def sign_transfer(spec, state, transfer, privkey): transfer.signature = bls_sign( message_hash=signing_root(transfer), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_TRANSFER, - message_epoch=get_current_epoch(state), + message_epoch=spec.get_current_epoch(state), ) ) return transfer diff --git a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py index 54376d694..120a9f600 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py +++ b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py @@ -1,26 +1,22 @@ -# Access constants from spec pkg reference. -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import VoluntaryExit, get_domain from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.ssz.ssz_impl import signing_root -def build_voluntary_exit(state, epoch, validator_index, privkey, signed=False): - voluntary_exit = VoluntaryExit( +def build_voluntary_exit(spec, state, epoch, validator_index, privkey, signed=False): + voluntary_exit = spec.VoluntaryExit( epoch=epoch, validator_index=validator_index, ) if signed: - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) return voluntary_exit -def sign_voluntary_exit(state, voluntary_exit, privkey): +def sign_voluntary_exit(spec, state, voluntary_exit, privkey): voluntary_exit.signature = bls_sign( message_hash=signing_root(voluntary_exit), privkey=privkey, - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, message_epoch=voluntary_exit.epoch, diff --git a/test_libs/pyspec/eth2spec/test/phase_0/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py new file mode 100644 index 000000000..2b34ab405 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -0,0 +1,314 @@ +from copy import deepcopy + +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases +from eth2spec.test.helpers.attestations import ( + get_valid_attestation, + sign_attestation, +) +from eth2spec.test.helpers.state import ( + next_epoch, + next_slot, +) +from eth2spec.test.helpers.block import apply_empty_block + + +def run_attestation_processing(spec, state, attestation, valid=True): + """ + Run ``process_attestation``, yielding: + - pre-state ('pre') + - attestation ('attestation') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + # yield pre-state + yield 'pre', state + + yield 'attestation', attestation + + # If the attestation is invalid, processing is aborted, and there is no post-state. + if not valid: + expect_assertion_error(lambda: spec.process_attestation(state, attestation)) + yield 'post', None + return + + current_epoch_count = len(state.current_epoch_attestations) + previous_epoch_count = len(state.previous_epoch_attestations) + + # process attestation + spec.process_attestation(state, attestation) + + # Make sure the attestation has been processed + if attestation.data.target_epoch == spec.get_current_epoch(state): + assert len(state.current_epoch_attestations) == current_epoch_count + 1 + else: + assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 + + # yield post-state + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@spec_state_test +def test_success_previous_epoch(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + next_epoch(spec, state) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@spec_state_test +def test_success_since_max_epochs_per_crosslink(spec, state): + for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + data = attestation.data + # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs + assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK + + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_attestation_signature(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_before_inclusion_delay(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + # do not increment slot to allow for inclusion delay + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_after_epoch_slots(spec, state): + attestation = get_valid_attestation(spec, state, signed=True) + # increment past latest inclusion slot + spec.process_slots(state, state.slot + spec.SLOTS_PER_EPOCH + 1) + apply_empty_block(spec, state) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_old_source_epoch(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 5 + state.finalized_epoch = 2 + state.previous_justified_epoch = 3 + state.current_justified_epoch = 4 + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + + # test logic sanity check: make sure the attestation is pointing to oldest known source epoch + assert attestation.data.source_epoch == state.previous_justified_epoch + + # Now go beyond that, it will be invalid + attestation.data.source_epoch -= 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_wrong_shard(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink.shard += 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_new_source_epoch(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_epoch += 1 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_source_root_is_target_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = attestation.data.target_root + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_invalid_current_source_root(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 5 + state.finalized_epoch = 2 + + state.previous_justified_epoch = 3 + state.previous_justified_root = b'\x01' * 32 + + state.current_justified_epoch = 4 + state.current_justified_root = b'\xff' * 32 + + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + # Test logic sanity checks: + assert state.current_justified_root != state.previous_justified_root + assert attestation.data.source_root == state.previous_justified_root + + # Make attestation source root invalid: should be previous justified, not current one + attestation.data.source_root = state.current_justified_root + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_source_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = b'\x42' * 32 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_phases(['phase0']) +@spec_state_test +def test_non_zero_crosslink_data_root(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink.data_root = b'\x42' * 32 + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_parent_crosslink(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.parent_root = b'\x27' * 32 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_crosslink_start_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.start_epoch += 1 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_bad_crosslink_end_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(spec, state) + apply_empty_block(spec, state) + + attestation.data.crosslink.end_epoch += 1 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_inconsistent_bitfields(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00' + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_phases(['phase0']) +@spec_state_test +def test_non_empty_custody_bitfield(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +def test_empty_aggregation_bitfield(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) + + sign_attestation(spec, state, attestation) + + yield from run_attestation_processing(spec, state, attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py new file mode 100644 index 000000000..6c7637d59 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py @@ -0,0 +1,153 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.attestations import sign_indexed_attestation +from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.state import ( + get_balance, + next_epoch, +) + + +def run_attester_slashing_processing(spec, state, attester_slashing, valid=True): + """ + Run ``process_attester_slashing``, yielding: + - pre-state ('pre') + - attester_slashing ('attester_slashing') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + yield 'pre', state + yield 'attester_slashing', attester_slashing + + if not valid: + expect_assertion_error(lambda: spec.process_attester_slashing(state, attester_slashing)) + yield 'post', None + return + + slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + pre_slashed_balance = get_balance(state, slashed_index) + + proposer_index = spec.get_beacon_proposer_index(state) + pre_proposer_balance = get_balance(state, proposer_index) + + # Process slashing + spec.process_attester_slashing(state, attester_slashing) + + slashed_validator = state.validator_registry[slashed_index] + + # Check slashing + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + if slashed_index != proposer_index: + # lost whistleblower reward + assert get_balance(state, slashed_index) < pre_slashed_balance + # gained whistleblower reward + assert get_balance(state, proposer_index) > pre_proposer_balance + else: + # gained rewards for all slashings, which may include others. And only lost that of themselves. + # Netto at least 0, if more people where slashed, a balance increase. + assert get_balance(state, slashed_index) >= pre_slashed_balance + + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success_double(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) + + yield from run_attester_slashing_processing(spec, state, attester_slashing) + + +@with_all_phases +@spec_state_test +def test_success_surround(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + state.current_justified_epoch += 1 + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + # set attestion1 to surround attestation 2 + attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 + attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1 + + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_2(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False) + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_same_data(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.data = attester_slashing.attestation_2.data + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_no_double_or_surround(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.data.target_epoch += 1 + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_participants_already_slashed(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) + + # set all indices to slashed + attestation_1 = attester_slashing.attestation_1 + validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices + for index in validator_indices: + state.validator_registry[index].slashed = True + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) + + +@with_all_phases +@spec_state_test +def test_custody_bit_0_and_1(spec, state): + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) + + attester_slashing.attestation_1.custody_bit_1_indices = ( + attester_slashing.attestation_1.custody_bit_0_indices + ) + sign_indexed_attestation(spec, state, attester_slashing.attestation_1) + + yield from run_attester_slashing_processing(spec, state, attester_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py new file mode 100644 index 000000000..f3c017982 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py @@ -0,0 +1,85 @@ +from copy import deepcopy + +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, + sign_block +) +from eth2spec.test.helpers.state import next_slot + + +def prepare_state_for_header_processing(spec, state): + spec.process_slots(state, state.slot + 1) + + +def run_block_header_processing(spec, state, block, valid=True): + """ + Run ``process_block_header``, yielding: + - pre-state ('pre') + - block ('block') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + prepare_state_for_header_processing(spec, state) + + yield 'pre', state + yield 'block', block + + if not valid: + expect_assertion_error(lambda: spec.process_block_header(state, block)) + yield 'post', None + return + + spec.process_block_header(state, block) + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_success_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state, signed=True) + yield from run_block_header_processing(spec, state, block) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state) + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_invalid_slot_block_header(spec, state): + block = build_empty_block_for_next_slot(spec, state) + block.slot = state.slot + 2 # invalid slot + sign_block(spec, state, block) + + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_invalid_parent_root(spec, state): + block = build_empty_block_for_next_slot(spec, state) + block.parent_root = b'\12' * 32 # invalid prev root + sign_block(spec, state, block) + + yield from run_block_header_processing(spec, state, block, valid=False) + + +@with_all_phases +@spec_state_test +def test_proposer_slashed(spec, state): + # use stub state to get proposer index of next slot + stub_state = deepcopy(state) + next_slot(spec, stub_state) + proposer_index = spec.get_beacon_proposer_index(stub_state) + + # set proposer to slashed + state.validator_registry[proposer_index].slashed = True + + block = build_empty_block_for_next_slot(spec, state, signed=True) + + yield from run_block_header_processing(spec, state, block, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py new file mode 100644 index 000000000..c50b11f2e --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py @@ -0,0 +1,190 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.deposits import ( + build_deposit, + prepare_state_and_deposit, + sign_deposit_data, +) +from eth2spec.test.helpers.state import get_balance +from eth2spec.test.helpers.keys import privkeys, pubkeys + + +def run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True): + """ + Run ``process_deposit``, yielding: + - pre-state ('pre') + - deposit ('deposit') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + pre_validator_count = len(state.validator_registry) + pre_balance = 0 + if validator_index < pre_validator_count: + pre_balance = get_balance(state, validator_index) + + yield 'pre', state + yield 'deposit', deposit + + if not valid: + expect_assertion_error(lambda: spec.process_deposit(state, deposit)) + yield 'post', None + return + + spec.process_deposit(state, deposit) + + yield 'post', state + + if not effective: + assert len(state.validator_registry) == pre_validator_count + assert len(state.balances) == pre_validator_count + if validator_index < pre_validator_count: + assert get_balance(state, validator_index) == pre_balance + else: + if validator_index < pre_validator_count: + # top-up + assert len(state.validator_registry) == pre_validator_count + assert len(state.balances) == pre_validator_count + else: + # new validator + assert len(state.validator_registry) == pre_validator_count + 1 + assert len(state.balances) == pre_validator_count + 1 + assert get_balance(state, validator_index) == pre_balance + deposit.data.amount + + assert state.deposit_index == state.latest_eth1_data.deposit_count + + +@with_all_phases +@spec_state_test +def test_new_deposit(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validator_registry) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) + + yield from run_deposit_processing(spec, state, deposit, validator_index) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_new_deposit(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validator_registry) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False) + + +@with_all_phases +@spec_state_test +def test_success_top_up(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) + + yield from run_deposit_processing(spec, state, deposit, validator_index) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_top_up(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) + + # invalid signatures, in top-ups, are allowed! + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True) + + +@with_all_phases +@spec_state_test +def test_invalid_withdrawal_credentials_top_up(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(b"junk")[1:] + deposit = prepare_state_and_deposit( + spec, + state, + validator_index, + amount, + withdrawal_credentials=withdrawal_credentials + ) + + # inconsistent withdrawal credentials, in top-ups, are allowed! + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True) + + +@with_all_phases +@spec_state_test +def test_wrong_index(spec, state): + validator_index = len(state.validator_registry) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) + + # mess up deposit_index + deposit.index = state.deposit_index + 1 + + sign_deposit_data(spec, state, deposit.data, privkeys[validator_index]) + + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False) + + +@with_all_phases +@spec_state_test +def test_wrong_deposit_for_deposit_count(spec, state): + deposit_data_leaves = [spec.ZERO_HASH] * len(state.validator_registry) + + # build root for deposit_1 + index_1 = len(deposit_data_leaves) + pubkey_1 = pubkeys[index_1] + privkey_1 = privkeys[index_1] + _, _, deposit_data_leaves = build_deposit( + spec, + state, + deposit_data_leaves, + pubkey_1, + privkey_1, + spec.MAX_EFFECTIVE_BALANCE, + withdrawal_credentials=b'\x00' * 32, + signed=True, + ) + deposit_count_1 = len(deposit_data_leaves) + + # build root for deposit_2 + index_2 = len(deposit_data_leaves) + pubkey_2 = pubkeys[index_2] + privkey_2 = privkeys[index_2] + deposit_2, root_2, deposit_data_leaves = build_deposit( + spec, + state, + deposit_data_leaves, + pubkey_2, + privkey_2, + spec.MAX_EFFECTIVE_BALANCE, + withdrawal_credentials=b'\x00' * 32, + signed=True, + ) + + # state has root for deposit_2 but is at deposit_count for deposit_1 + state.latest_eth1_data.deposit_root = root_2 + state.latest_eth1_data.deposit_count = deposit_count_1 + + yield from run_deposit_processing(spec, state, deposit_2, index_2, valid=False) + + +# TODO: test invalid signature + + +@with_all_phases +@spec_state_test +def test_bad_merkle_proof(spec, state): + validator_index = len(state.validator_registry) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) + + # mess up merkle branch + deposit.proof[-1] = spec.ZERO_HASH + + sign_deposit_data(spec, state, deposit.data, privkeys[validator_index]) + + yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py new file mode 100644 index 000000000..b35241859 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py @@ -0,0 +1,142 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.block_header import sign_block_header +from eth2spec.test.helpers.keys import privkeys +from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing +from eth2spec.test.helpers.state import get_balance + + +def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True): + """ + Run ``process_proposer_slashing``, yielding: + - pre-state ('pre') + - proposer_slashing ('proposer_slashing') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + yield 'pre', state + yield 'proposer_slashing', proposer_slashing + + if not valid: + expect_assertion_error(lambda: spec.process_proposer_slashing(state, proposer_slashing)) + yield 'post', None + return + + pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index) + + spec.process_proposer_slashing(state, proposer_slashing) + yield 'post', state + + # check if slashed + slashed_validator = state.validator_registry[proposer_slashing.proposer_index] + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + # lost whistleblower reward + assert ( + get_balance(state, proposer_slashing.proposer_index) < + pre_proposer_balance + ) + + +@with_all_phases +@spec_state_test +def test_success(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_2(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_sig_1_and_2(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False) + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_invalid_proposer_index(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + # Index just too high (by 1) + proposer_slashing.proposer_index = len(state.validator_registry) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_epochs_are_different(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + + # set slots to be in different epochs + proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH + sign_block_header(spec, state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index]) + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_headers_are_same(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False) + + # set headers to be the same + proposer_slashing.header_2 = proposer_slashing.header_1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_not_activated(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # set proposer to be not active yet + state.validator_registry[proposer_slashing.proposer_index].activation_epoch = spec.get_current_epoch(state) + 1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_slashed(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # set proposer to slashed + state.validator_registry[proposer_slashing.proposer_index].slashed = True + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) + + +@with_all_phases +@spec_state_test +def test_proposer_is_withdrawn(spec, state): + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) + + # move 1 epoch into future, to allow for past withdrawable epoch + state.slot += spec.SLOTS_PER_EPOCH + # set proposer withdrawable_epoch in past + current_epoch = spec.get_current_epoch(state) + proposer_index = proposer_slashing.proposer_index + state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 + + yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py new file mode 100644 index 000000000..1294ca84a --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py @@ -0,0 +1,184 @@ +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.transfers import get_valid_transfer + + +def run_transfer_processing(spec, state, transfer, valid=True): + """ + Run ``process_transfer``, yielding: + - pre-state ('pre') + - transfer ('transfer') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + + proposer_index = spec.get_beacon_proposer_index(state) + pre_transfer_sender_balance = state.balances[transfer.sender] + pre_transfer_recipient_balance = state.balances[transfer.recipient] + pre_transfer_proposer_balance = state.balances[proposer_index] + + yield 'pre', state + yield 'transfer', transfer + + if not valid: + expect_assertion_error(lambda: spec.process_transfer(state, transfer)) + yield 'post', None + return + + spec.process_transfer(state, transfer) + yield 'post', state + + sender_balance = state.balances[transfer.sender] + recipient_balance = state.balances[transfer.recipient] + assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee + assert recipient_balance == pre_transfer_recipient_balance + transfer.amount + assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee + + +@with_all_phases +@spec_state_test +def test_success_non_activated(spec, state): + transfer = get_valid_transfer(spec, state, signed=True) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_withdrawable(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + transfer = get_valid_transfer(spec, state, signed=True) + + # withdrawable_epoch in past so can transfer + state.validator_registry[transfer.sender].withdrawable_epoch = spec.get_current_epoch(state) - 1 + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_active_above_max_effective(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@spec_state_test +def test_success_active_above_max_effective_fee(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) + + yield from run_transfer_processing(spec, state, transfer) + + +@with_all_phases +@always_bls +@spec_state_test +def test_invalid_signature(spec, state): + transfer = get_valid_transfer(spec, state) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_active_but_transfer_past_effective_balance(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + amount = spec.MAX_EFFECTIVE_BALANCE // 32 + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=amount, fee=0, signed=True) + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_incorrect_slot(spec, state): + transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_insufficient_balance_for_fee(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_insufficient_balance(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_no_dust_sender(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + balance = state.balances[sender_index] + transfer = get_valid_transfer( + spec, + state, + sender_index=sender_index, + amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, + fee=0, + signed=True, + ) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_no_dust_recipient(spec, state): + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 + transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) + state.balances[transfer.recipient] = 0 + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) + + +@with_all_phases +@spec_state_test +def test_invalid_pubkey(spec, state): + transfer = get_valid_transfer(spec, state, signed=True) + state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + yield from run_transfer_processing(spec, state, transfer, False) diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py similarity index 61% rename from test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py rename to test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py index 53fb4e3f7..3359c5e78 100644 --- a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py @@ -1,16 +1,9 @@ -import eth2spec.phase0.spec as spec -from eth2spec.phase0.spec import ( - get_active_validator_indices, - get_churn_limit, - get_current_epoch, - process_voluntary_exit, -) -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls +from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.voluntary_exits import build_voluntary_exit, sign_voluntary_exit -def run_voluntary_exit_processing(state, voluntary_exit, valid=True): +def run_voluntary_exit_processing(spec, state, voluntary_exit, valid=True): """ Run ``process_voluntary_exit``, yielding: - pre-state ('pre') @@ -24,13 +17,13 @@ def run_voluntary_exit_processing(state, voluntary_exit, valid=True): yield 'voluntary_exit', voluntary_exit if not valid: - expect_assertion_error(lambda: process_voluntary_exit(state, voluntary_exit)) + expect_assertion_error(lambda: spec.process_voluntary_exit(state, voluntary_exit)) yield 'post', None return pre_exit_epoch = state.validator_registry[validator_index].exit_epoch - process_voluntary_exit(state, voluntary_exit) + spec.process_voluntary_exit(state, voluntary_exit) yield 'post', state @@ -38,50 +31,54 @@ def run_voluntary_exit_processing(state, voluntary_exit, valid=True): assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_success(state): +def test_success(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey, signed=True) + voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey, signed=True) - yield from run_voluntary_exit_processing(state, voluntary_exit) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit) +@with_all_phases @always_bls @spec_state_test -def test_invalid_signature(state): +def test_invalid_signature(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey) + voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_success_exit_queue(state): +def test_success_exit_queue(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) + current_epoch = spec.get_current_epoch(state) # exit `MAX_EXITS_PER_EPOCH` - initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)] + initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_churn_limit(state)] # Prepare a bunch of exits, based on the current state exit_queue = [] for index in initial_indices: privkey = pubkey_to_privkey[state.validator_registry[index].pubkey] exit_queue.append(build_voluntary_exit( + spec, state, current_epoch, index, @@ -92,13 +89,14 @@ def test_success_exit_queue(state): # Now run all the exits for voluntary_exit in exit_queue: # the function yields data, but we are just interested in running it here, ignore yields. - for _ in run_voluntary_exit_processing(state, voluntary_exit): + for _ in run_voluntary_exit_processing(spec, state, voluntary_exit): continue # exit an additional validator - validator_index = get_active_validator_indices(state, current_epoch)[-1] + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -108,7 +106,7 @@ def test_success_exit_queue(state): # This is the interesting part of the test: on a pre-state with a full exit queue, # when processing an additional exit, it results in an exit in a later epoch - yield from run_voluntary_exit_processing(state, voluntary_exit) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit) assert ( state.validator_registry[validator_index].exit_epoch == @@ -116,16 +114,18 @@ def test_success_exit_queue(state): ) +@with_all_phases @spec_state_test -def test_validator_exit_in_future(state): +def test_validator_exit_in_future(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -133,21 +133,23 @@ def test_validator_exit_in_future(state): signed=False, ) voluntary_exit.epoch += 1 - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_invalid_validator_index(state): +def test_validator_invalid_validator_index(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -155,21 +157,23 @@ def test_validator_invalid_validator_index(state): signed=False, ) voluntary_exit.validator_index = len(state.validator_registry) - sign_voluntary_exit(state, voluntary_exit, privkey) + sign_voluntary_exit(spec, state, voluntary_exit, privkey) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_not_active(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] +def test_validator_not_active(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH # build and test voluntary exit voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -177,22 +181,24 @@ def test_validator_not_active(state): signed=True, ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_already_exited(state): +def test_validator_already_exited(spec, state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] # but validator already has exited state.validator_registry[validator_index].exit_epoch = current_epoch + 2 voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -200,16 +206,18 @@ def test_validator_already_exited(state): signed=True, ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) +@with_all_phases @spec_state_test -def test_validator_not_active_long_enough(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[0] +def test_validator_not_active_long_enough(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( + spec, state, current_epoch, validator_index, @@ -222,4 +230,4 @@ def test_validator_not_active_long_enough(state): spec.PERSISTENT_COMMITTEE_PERIOD ) - yield from run_voluntary_exit_processing(state, voluntary_exit, False) + yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py new file mode 100644 index 000000000..65d958678 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py @@ -0,0 +1,150 @@ +from copy import deepcopy + +from eth2spec.test.context import spec_state_test, with_all_phases +from eth2spec.test.helpers.state import ( + next_epoch, + next_slot +) +from eth2spec.test.helpers.block import apply_empty_block, sign_block +from eth2spec.test.helpers.attestations import ( + add_attestation_to_state, + build_empty_block_for_next_slot, + fill_aggregate_attestation, + get_valid_attestation, + sign_attestation, +) + + +def run_process_crosslinks(spec, state, valid=True): + """ + Run ``process_crosslinks``, yielding: + - pre-state ('pre') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + # transition state to slot before state transition + slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 + block = build_empty_block_for_next_slot(spec, state) + block.slot = slot + sign_block(spec, state, block) + spec.state_transition(state, block) + + # cache state before epoch transition + spec.process_slot(state) + + yield 'pre', state + spec.process_crosslinks(state) + yield 'post', state + + +@with_all_phases +@spec_state_test +def test_no_attestations(spec, state): + yield from run_process_crosslinks(spec, state) + + for shard in range(spec.SHARD_COUNT): + assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] + + +@with_all_phases +@spec_state_test +def test_single_crosslink_update_from_current_epoch(spec, state): + next_epoch(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + + fill_aggregate_attestation(spec, state, attestation) + add_attestation_to_state(spec, state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + assert len(state.current_epoch_attestations) == 1 + + shard = attestation.data.crosslink.shard + pre_crosslink = deepcopy(state.current_crosslinks[shard]) + + yield from run_process_crosslinks(spec, state) + + assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] + assert pre_crosslink != state.current_crosslinks[shard] + + +@with_all_phases +@spec_state_test +def test_single_crosslink_update_from_previous_epoch(spec, state): + next_epoch(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + + fill_aggregate_attestation(spec, state, attestation) + add_attestation_to_state(spec, state, attestation, state.slot + spec.SLOTS_PER_EPOCH) + + assert len(state.previous_epoch_attestations) == 1 + + shard = attestation.data.crosslink.shard + pre_crosslink = deepcopy(state.current_crosslinks[shard]) + + crosslink_deltas = spec.get_crosslink_deltas(state) + + yield from run_process_crosslinks(spec, state) + + assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] + assert pre_crosslink != state.current_crosslinks[shard] + + # ensure rewarded + for index in spec.get_crosslink_committee( + state, + attestation.data.target_epoch, + attestation.data.crosslink.shard): + assert crosslink_deltas[0][index] > 0 + assert crosslink_deltas[1][index] == 0 + + +@with_all_phases +@spec_state_test +def test_double_late_crosslink(spec, state): + if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT: + print("warning: ignoring test, test-assumptions are incompatible with configuration") + return + + next_epoch(spec, state) + state.slot += 4 + + attestation_1 = get_valid_attestation(spec, state, signed=True) + fill_aggregate_attestation(spec, state, attestation_1) + + # add attestation_1 to next epoch + next_epoch(spec, state) + add_attestation_to_state(spec, state, attestation_1, state.slot + 1) + + for _ in range(spec.SLOTS_PER_EPOCH): + attestation_2 = get_valid_attestation(spec, state) + if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard: + sign_attestation(spec, state, attestation_2) + break + next_slot(spec, state) + apply_empty_block(spec, state) + + fill_aggregate_attestation(spec, state, attestation_2) + + # add attestation_2 in the next epoch after attestation_1 has + # already updated the relevant crosslink + next_epoch(spec, state) + add_attestation_to_state(spec, state, attestation_2, state.slot + 1) + + assert len(state.previous_epoch_attestations) == 1 + assert len(state.current_epoch_attestations) == 0 + + crosslink_deltas = spec.get_crosslink_deltas(state) + + yield from run_process_crosslinks(spec, state) + + shard = attestation_2.data.crosslink.shard + + # ensure that the current crosslinks were not updated by the second attestation + assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] + # ensure no reward, only penalties for the failed crosslink + for index in spec.get_crosslink_committee( + state, + attestation_2.data.target_epoch, + attestation_2.data.crosslink.shard): + assert crosslink_deltas[0][index] == 0 + assert crosslink_deltas[1][index] > 0 diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py similarity index 62% rename from test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py rename to test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py index 71bf89c70..e6679f844 100644 --- a/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py @@ -1,17 +1,10 @@ -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.spec import ( - get_current_epoch, - is_active_validator, - process_registry_updates -) -from eth2spec.phase0.state_transition import state_transition +from eth2spec.phase0.spec import state_transition from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block from eth2spec.test.helpers.state import next_epoch -from eth2spec.test.context import spec_state_test +from eth2spec.test.context import spec_state_test, with_all_phases -def run_process_registry_updates(state, valid=True): +def run_process_registry_updates(spec, state, valid=True): """ Run ``process_crosslinks``, yielding: - pre-state ('pre') @@ -20,13 +13,13 @@ def run_process_registry_updates(state, valid=True): """ # transition state to slot before state transition slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot = slot - sign_block(state, block) + sign_block(spec, state, block) state_transition(state, block) # cache state before epoch transition - spec.cache_state(state) + spec.process_slot(state) # process components of epoch transition before registry update spec.process_justification_and_finalization(state) @@ -34,50 +27,52 @@ def run_process_registry_updates(state, valid=True): spec.process_rewards_and_penalties(state) yield 'pre', state - process_registry_updates(state) + spec.process_registry_updates(state) yield 'post', state +@with_all_phases @spec_state_test -def test_activation(state): +def test_activation(spec, state): index = 0 - assert is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) # Mock a new deposit state.validator_registry[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH state.validator_registry[index].activation_epoch = spec.FAR_FUTURE_EPOCH state.validator_registry[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE - assert not is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert not spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): - next_epoch(state) + next_epoch(spec, state) - yield from run_process_registry_updates(state) + yield from run_process_registry_updates(spec, state) assert state.validator_registry[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validator_registry[index].activation_epoch != spec.FAR_FUTURE_EPOCH - assert is_active_validator( + assert spec.is_active_validator( state.validator_registry[index], - get_current_epoch(state), + spec.get_current_epoch(state), ) +@with_all_phases @spec_state_test -def test_ejection(state): +def test_ejection(spec, state): index = 0 - assert is_active_validator(state.validator_registry[index], get_current_epoch(state)) + assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state)) assert state.validator_registry[index].exit_epoch == spec.FAR_FUTURE_EPOCH # Mock an ejection state.validator_registry[index].effective_balance = spec.EJECTION_BALANCE for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): - next_epoch(state) + next_epoch(spec, state) - yield from run_process_registry_updates(state) + yield from run_process_registry_updates(spec, state) assert state.validator_registry[index].exit_epoch != spec.FAR_FUTURE_EPOCH - assert not is_active_validator( + assert not spec.is_active_validator( state.validator_registry[index], - get_current_epoch(state), + spec.get_current_epoch(state), ) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/__init__.py b/test_libs/pyspec/eth2spec/test/phase_1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/__init__.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py new file mode 100644 index 000000000..110231d77 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py @@ -0,0 +1,128 @@ +from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.helpers.state import next_epoch, get_balance +from eth2spec.test.context import with_all_phases_except, spec_state_test, expect_assertion_error + + +def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, valid=True): + """ + Run ``process_randao_key_reveal``, yielding: + - pre-state ('pre') + - randao_key_reveal ('randao_key_reveal') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'randao_key_reveal', randao_key_reveal + + if not valid: + expect_assertion_error(lambda: spec.process_early_derived_secret_reveal(state, randao_key_reveal)) + yield 'post', None + return + + pre_slashed_balance = get_balance(state, randao_key_reveal.revealed_index) + + spec.process_early_derived_secret_reveal(state, randao_key_reveal) + + slashed_validator = state.validator_registry[randao_key_reveal.revealed_index] + + if randao_key_reveal.epoch >= spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING: + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + assert get_balance(state, randao_key_reveal.revealed_index) < pre_slashed_balance + yield 'post', state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_success(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_from_current_epoch(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_from_past_epoch(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state) - 1) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_with_custody_padding(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING, + ) + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_reveal_with_custody_padding_minus_one(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING - 1, + ) + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_double_reveal(spec, state): + randao_key_reveal1 = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS + 1, + ) + res = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1)) + pre_state = res['pre'] + yield 'pre', pre_state + intermediate_state = res['post'] + + randao_key_reveal2 = get_valid_early_derived_secret_reveal( + spec, + intermediate_state, + spec.get_current_epoch(pre_state) + spec.RANDAO_PENALTY_EPOCHS + 1, + ) + res = dict(run_early_derived_secret_reveal_processing(spec, intermediate_state, randao_key_reveal2, False)) + post_state = res['post'] + yield 'randao_key_reveal', [randao_key_reveal1, randao_key_reveal2] + yield 'post', post_state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_revealer_is_slashed(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) + state.validator_registry[randao_key_reveal.revealed_index].slashed = True + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_far_future_epoch(spec, state): + randao_key_reveal = get_valid_early_derived_secret_reveal( + spec, + state, + spec.get_current_epoch(state) + spec.EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS, + ) + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py index c9aadbf2a..587c37742 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py @@ -1,22 +1,9 @@ from copy import deepcopy +from typing import List -import eth2spec.phase0.spec as spec +from eth2spec.utils.ssz.ssz_impl import signing_root from eth2spec.utils.bls import bls_sign -from eth2spec.utils.minimal_ssz import signing_root -from eth2spec.phase0.spec import ( - # SSZ - VoluntaryExit, - # functions - get_active_validator_indices, - get_beacon_proposer_index, - get_block_root_at_slot, - get_current_epoch, - get_domain, -) -from eth2spec.phase0.state_transition import ( - state_transition, -) from eth2spec.test.helpers.state import get_balance from eth2spec.test.helpers.transfers import get_valid_transfer from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block @@ -26,89 +13,94 @@ from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.deposits import prepare_state_and_deposit -from eth2spec.test.context import spec_state_test, never_bls +from eth2spec.test.context import spec_state_test, never_bls, with_all_phases +@with_all_phases @never_bls @spec_state_test -def test_empty_block_transition(state): +def test_empty_block_transition(spec, state): pre_slot = state.slot pre_eth1_votes = len(state.eth1_data_votes) yield 'pre', state - block = build_empty_block_for_next_slot(state, signed=True) - yield 'blocks', [block], [spec.BeaconBlock] + block = build_empty_block_for_next_slot(spec, state, signed=True) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.eth1_data_votes) == pre_eth1_votes + 1 - assert get_block_root_at_slot(state, pre_slot) == block.previous_block_root + assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root +@with_all_phases @never_bls @spec_state_test -def test_skipped_slots(state): +def test_skipped_slots(spec, state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += 3 - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): - assert get_block_root_at_slot(state, slot) == block.previous_block_root + assert spec.get_block_root_at_slot(state, slot) == block.parent_root +@with_all_phases @spec_state_test -def test_empty_epoch_transition(state): +def test_empty_epoch_transition(spec, state): pre_slot = state.slot yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): - assert get_block_root_at_slot(state, slot) == block.previous_block_root + assert spec.get_block_root_at_slot(state, slot) == block.parent_root +# @with_all_phases # @spec_state_test -# def test_empty_epoch_transition_not_finalizing(state): +# def test_empty_epoch_transition_not_finalizing(spec, state): # # copy for later balance lookups. # pre_state = deepcopy(state) # yield 'pre', state -# -# block = build_empty_block_for_next_slot(state) + +# block = build_empty_block_for_next_slot(spec, state) # block.slot += spec.SLOTS_PER_EPOCH * 5 -# sign_block(state, block, proposer_index=0) -# yield 'blocks', [block], [spec.BeaconBlock] -# -# state_transition(state, block) +# sign_block(spec, state, block, proposer_index=0) +# yield 'blocks', [block], List[spec.BeaconBlock] + +# spec.state_transition(state, block) # yield 'post', state -# + # assert state.slot == block.slot -# assert state.finalized_epoch < get_current_epoch(state) - 4 +# assert state.finalized_epoch < spec.get_current_epoch(state) - 4 # for index in range(len(state.validator_registry)): # assert get_balance(state, index) < get_balance(pre_state, index) +@with_all_phases @spec_state_test -def test_proposer_slashing(state): +def test_proposer_slashing(spec, state): # copy for later balance lookups. pre_state = deepcopy(state) - proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True) + proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) validator_index = proposer_slashing.proposer_index assert not state.validator_registry[validator_index].slashed @@ -118,12 +110,12 @@ def test_proposer_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings.append(proposer_slashing) - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state # check if slashed @@ -135,12 +127,13 @@ def test_proposer_slashing(state): assert get_balance(state, validator_index) < get_balance(pre_state, validator_index) +@with_all_phases @spec_state_test -def test_attester_slashing(state): +def test_attester_slashing(spec, state): # copy for later balance lookups. pre_state = deepcopy(state) - attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True) + attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) validator_index = (attester_slashing.attestation_1.custody_bit_0_indices + attester_slashing.attestation_1.custody_bit_1_indices)[0] @@ -151,12 +144,12 @@ def test_attester_slashing(state): # # Add to state via block transition # - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings.append(attester_slashing) - sign_block(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + sign_block(spec, state, block) + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state slashed_validator = state.validator_registry[validator_index] @@ -166,7 +159,7 @@ def test_attester_slashing(state): # lost whistleblower reward assert get_balance(state, validator_index) < get_balance(pre_state, validator_index) - proposer_index = get_beacon_proposer_index(state) + proposer_index = spec.get_beacon_proposer_index(state) # gained whistleblower reward assert ( get_balance(state, proposer_index) > @@ -176,24 +169,25 @@ def test_attester_slashing(state): # TODO update functions below to be like above, i.e. with @spec_state_test and yielding data to put into the test vector +@with_all_phases @spec_state_test -def test_deposit_in_block(state): +def test_deposit_in_block(spec, state): initial_registry_len = len(state.validator_registry) initial_balances_len = len(state.balances) validator_index = len(state.validator_registry) amount = spec.MAX_EFFECTIVE_BALANCE - deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.validator_registry) == initial_registry_len + 1 @@ -202,11 +196,12 @@ def test_deposit_in_block(state): assert state.validator_registry[validator_index].pubkey == pubkeys[validator_index] +@with_all_phases @spec_state_test -def test_deposit_top_up(state): +def test_deposit_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 - deposit = prepare_state_and_deposit(state, validator_index, amount) + deposit = prepare_state_and_deposit(spec, state, validator_index, amount) initial_registry_len = len(state.validator_registry) initial_balances_len = len(state.balances) @@ -214,13 +209,13 @@ def test_deposit_top_up(state): yield 'pre', state - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state assert len(state.validator_registry) == initial_registry_len @@ -228,44 +223,46 @@ def test_deposit_top_up(state): assert get_balance(state, validator_index) == validator_pre_balance + amount +@with_all_phases @spec_state_test -def test_attestation(state): +def test_attestation(spec, state): state.slot = spec.SLOTS_PER_EPOCH yield 'pre', state - attestation = get_valid_attestation(state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) # Add to state via block transition pre_current_attestations_len = len(state.current_epoch_attestations) - attestation_block = build_empty_block_for_next_slot(state) + attestation_block = build_empty_block_for_next_slot(spec, state) attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation_block.body.attestations.append(attestation) - sign_block(state, attestation_block) - state_transition(state, attestation_block) + sign_block(spec, state, attestation_block) + spec.state_transition(state, attestation_block) assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations) - epoch_block = build_empty_block_for_next_slot(state) + epoch_block = build_empty_block_for_next_slot(spec, state) epoch_block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, epoch_block) - state_transition(state, epoch_block) + sign_block(spec, state, epoch_block) + spec.state_transition(state, epoch_block) - yield 'blocks', [attestation_block, epoch_block], [spec.BeaconBlock] + yield 'blocks', [attestation_block, epoch_block], List[spec.BeaconBlock] yield 'post', state assert len(state.current_epoch_attestations) == 0 assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root +@with_all_phases @spec_state_test -def test_voluntary_exit(state): - validator_index = get_active_validator_indices( +def test_voluntary_exit(spec, state): + validator_index = spec.get_active_validator_indices( state, - get_current_epoch(state) + spec.get_current_epoch(state) )[-1] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit @@ -273,48 +270,49 @@ def test_voluntary_exit(state): yield 'pre', state - voluntary_exit = VoluntaryExit( - epoch=get_current_epoch(state), + voluntary_exit = spec.VoluntaryExit( + epoch=spec.get_current_epoch(state), validator_index=validator_index, ) voluntary_exit.signature = bls_sign( message_hash=signing_root(voluntary_exit), privkey=privkeys[validator_index], - domain=get_domain( + domain=spec.get_domain( state=state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, ) ) # Add to state via block transition - initiate_exit_block = build_empty_block_for_next_slot(state) + initiate_exit_block = build_empty_block_for_next_slot(spec, state) initiate_exit_block.body.voluntary_exits.append(voluntary_exit) - sign_block(state, initiate_exit_block) - state_transition(state, initiate_exit_block) + sign_block(spec, state, initiate_exit_block) + spec.state_transition(state, initiate_exit_block) assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # Process within epoch transition - exit_block = build_empty_block_for_next_slot(state) + exit_block = build_empty_block_for_next_slot(spec, state) exit_block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, exit_block) - state_transition(state, exit_block) + sign_block(spec, state, exit_block) + spec.state_transition(state, exit_block) - yield 'blocks', [initiate_exit_block, exit_block], [spec.BeaconBlock] + yield 'blocks', [initiate_exit_block, exit_block], List[spec.BeaconBlock] yield 'post', state assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_transfer(state): +def test_transfer(spec, state): # overwrite default 0 to test spec.MAX_TRANSFERS = 1 - sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] amount = get_balance(state, sender_index) - transfer = get_valid_transfer(state, state.slot + 1, sender_index, amount, signed=True) + transfer = get_valid_transfer(spec, state, state.slot + 1, sender_index, amount, signed=True) recipient_index = transfer.recipient pre_transfer_recipient_balance = get_balance(state, recipient_index) @@ -324,13 +322,13 @@ def test_transfer(state): yield 'pre', state # Add to state via block transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.body.transfers.append(transfer) - sign_block(state, block) + sign_block(spec, state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] - state_transition(state, block) + spec.state_transition(state, block) yield 'post', state sender_balance = get_balance(state, sender_index) @@ -339,10 +337,11 @@ def test_transfer(state): assert recipient_balance == pre_transfer_recipient_balance + amount +@with_all_phases @spec_state_test -def test_balance_driven_status_transitions(state): - current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state, current_epoch)[-1] +def test_balance_driven_status_transitions(spec, state): + current_epoch = spec.get_current_epoch(state) + validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH @@ -352,57 +351,59 @@ def test_balance_driven_status_transitions(state): yield 'pre', state # trigger epoch transition - block = build_empty_block_for_next_slot(state) + block = build_empty_block_for_next_slot(spec, state) block.slot += spec.SLOTS_PER_EPOCH - sign_block(state, block) - state_transition(state, block) + sign_block(spec, state, block) + spec.state_transition(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] yield 'post', state assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +@with_all_phases @spec_state_test -def test_historical_batch(state): +def test_historical_batch(spec, state): state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 pre_historical_roots_len = len(state.historical_roots) yield 'pre', state - block = build_empty_block_for_next_slot(state, signed=True) - state_transition(state, block) + block = build_empty_block_for_next_slot(spec, state, signed=True) + spec.state_transition(state, block) - yield 'blocks', [block], [spec.BeaconBlock] + yield 'blocks', [block], List[spec.BeaconBlock] yield 'post', state assert state.slot == block.slot - assert get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 + assert spec.get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 assert len(state.historical_roots) == pre_historical_roots_len + 1 +# @with_all_phases # @spec_state_test -# def test_eth1_data_votes(state): +# def test_eth1_data_votes(spec, state): # yield 'pre', state -# + # expected_votes = 0 # assert len(state.eth1_data_votes) == expected_votes -# + # blocks = [] # for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1): -# block = build_empty_block_for_next_slot(state) -# state_transition(state, block) +# block = build_empty_block_for_next_slot(spec, state) +# spec.state_transition(state, block) # expected_votes += 1 # assert len(state.eth1_data_votes) == expected_votes # blocks.append(block) -# -# block = build_empty_block_for_next_slot(state) + +# block = build_empty_block_for_next_slot(spec, state) # blocks.append(block) -# -# state_transition(state, block) -# -# yield 'blocks', [block], [spec.BeaconBlock] + +# spec.state_transition(state, block) + +# yield 'blocks', [block], List[spec.BeaconBlock] # yield 'post', state -# + # assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 # assert len(state.eth1_data_votes) == 1 diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_slots.py b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py index 2e5f3a5df..6ef6be4d3 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_slots.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py @@ -1,58 +1,59 @@ -import eth2spec.phase0.spec as spec - -from eth2spec.phase0.state_transition import state_transition_to from eth2spec.test.helpers.state import get_state_root -from eth2spec.test.context import spec_state_test +from eth2spec.test.context import spec_state_test, with_all_phases +@with_all_phases @spec_state_test -def test_slots_1(state): +def test_slots_1(spec, state): pre_slot = state.slot pre_root = state.hash_tree_root() yield 'pre', state slots = 1 yield 'slots', slots - state_transition_to(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state assert state.slot == pre_slot + 1 - assert get_state_root(state, pre_slot) == pre_root + assert get_state_root(spec, state, pre_slot) == pre_root +@with_all_phases @spec_state_test -def test_slots_2(state): +def test_slots_2(spec, state): yield 'pre', state slots = 2 yield 'slots', slots - state_transition_to(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_empty_epoch(state): +def test_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', slots - state_transition_to(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_double_empty_epoch(state): +def test_double_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH * 2 yield 'slots', slots - state_transition_to(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state +@with_all_phases @spec_state_test -def test_over_epoch_boundary(state): - state_transition_to(state, state.slot + (spec.SLOTS_PER_EPOCH // 2)) +def test_over_epoch_boundary(spec, state): + spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2)) yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', slots - state_transition_to(state, state.slot + slots) + spec.process_slots(state, state.slot + slots) yield 'post', state - diff --git a/test_libs/pyspec/eth2spec/test/test_finality.py b/test_libs/pyspec/eth2spec/test/test_finality.py index 56f65eca9..801e8b4fd 100644 --- a/test_libs/pyspec/eth2spec/test/test_finality.py +++ b/test_libs/pyspec/eth2spec/test/test_finality.py @@ -1,20 +1,14 @@ from copy import deepcopy +from typing import List -import eth2spec.phase0.spec as spec -from eth2spec.phase0.state_transition import ( - state_transition, -) -from .context import spec_state_test, never_bls -from .helpers.state import next_epoch -from .helpers.block import build_empty_block_for_next_slot, apply_empty_block -from .helpers.attestations import ( - get_current_epoch, - get_epoch_start_slot, - get_valid_attestation, -) +from eth2spec.test.context import spec_state_test, never_bls, with_all_phases +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, apply_empty_block +from eth2spec.test.helpers.attestations import get_valid_attestation -def check_finality(state, +def check_finality(spec, + state, prev_state, current_justified_changed, previous_justified_changed, @@ -41,164 +35,169 @@ def check_finality(state, assert state.finalized_root == prev_state.finalized_root -def next_epoch_with_attestations(state, +def next_epoch_with_attestations(spec, + state, fill_cur_epoch, fill_prev_epoch): post_state = deepcopy(state) blocks = [] for _ in range(spec.SLOTS_PER_EPOCH): - block = build_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(spec, post_state) if fill_cur_epoch: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)): - cur_attestation = get_valid_attestation(post_state, slot_to_attest) + if slot_to_attest >= spec.get_epoch_start_slot(spec.get_current_epoch(post_state)): + cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest) block.body.attestations.append(cur_attestation) if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - prev_attestation = get_valid_attestation(post_state, slot_to_attest) + prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest) block.body.attestations.append(prev_attestation) - state_transition(post_state, block) + spec.state_transition(post_state, block) blocks.append(block) return state, blocks, post_state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_4(state): +def test_finality_rule_4(spec, state): yield 'pre', state blocks = [] for epoch in range(4): - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks # justification/finalization skipped at GENESIS_EPOCH if epoch == 0: - check_finality(state, prev_state, False, False, False) + check_finality(spec, state, prev_state, False, False, False) # justification/finalization skipped at GENESIS_EPOCH + 1 elif epoch == 1: - check_finality(state, prev_state, False, False, False) + check_finality(spec, state, prev_state, False, False, False) elif epoch == 2: - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch >= 3: # rule 4 of finality - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.current_justified_epoch assert state.finalized_root == prev_state.current_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_1(state): +def test_finality_rule_1(spec, state): # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks if epoch == 0: - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: - check_finality(state, prev_state, True, True, False) + check_finality(spec, state, prev_state, True, True, False) elif epoch == 2: # finalized by rule 1 - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.previous_justified_epoch assert state.finalized_root == prev_state.previous_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_2(state): +def test_finality_rule_2(spec, state): # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): if epoch == 0: - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) - check_finality(state, prev_state, True, False, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) + check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False) - check_finality(state, prev_state, False, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) + check_finality(spec, state, prev_state, False, True, False) elif epoch == 2: - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) # finalized by rule 2 - check_finality(state, prev_state, True, False, True) + check_finality(spec, state, prev_state, True, False, True) assert state.finalized_epoch == prev_state.previous_justified_epoch assert state.finalized_root == prev_state.previous_justified_root blocks += new_blocks - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state +@with_all_phases @never_bls @spec_state_test -def test_finality_rule_3(state): +def test_finality_rule_3(spec, state): """ Test scenario described here https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892 """ # get past first two epochs that finality does not run on - next_epoch(state) - apply_empty_block(state) - next_epoch(state) - apply_empty_block(state) + next_epoch(spec, state) + apply_empty_block(spec, state) + next_epoch(spec, state) + apply_empty_block(spec, state) yield 'pre', state blocks = [] - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks - check_finality(state, prev_state, True, False, False) + check_finality(spec, state, prev_state, True, False, False) # In epoch N, JE is set to N, prev JE is set to N-1 - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) # In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) blocks += new_blocks - check_finality(state, prev_state, False, True, False) + check_finality(spec, state, prev_state, False, True, False) # In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1. # N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2 - prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks # rule 2 - check_finality(state, prev_state, True, False, True) + check_finality(spec, state, prev_state, True, False, True) # In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3. - prev_state, new_blocks, state = next_epoch_with_attestations(state, True, True) + prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True) blocks += new_blocks # rule 3 - check_finality(state, prev_state, True, True, True) + check_finality(spec, state, prev_state, True, True, True) assert state.finalized_epoch == prev_state.current_justified_epoch assert state.finalized_root == prev_state.current_justified_root - yield 'blocks', blocks, [spec.BeaconBlock] + yield 'blocks', blocks, List[spec.BeaconBlock] yield 'post', state diff --git a/test_libs/pyspec/eth2spec/utils/hash_function.py b/test_libs/pyspec/eth2spec/utils/hash_function.py index 3fee63d82..f965827d0 100644 --- a/test_libs/pyspec/eth2spec/utils/hash_function.py +++ b/test_libs/pyspec/eth2spec/utils/hash_function.py @@ -1,6 +1,5 @@ from hashlib import sha256 -# from eth_utils import keccak -def hash(x): return sha256(x).digest() -# def hash(x): return keccak(x) +def hash(x): + return sha256(x).digest() diff --git a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py index 7c5483de3..c508f0df2 100644 --- a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py +++ b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py @@ -1,7 +1,9 @@ from .hash_function import hash -zerohashes = [b'\x00' * 32] +ZERO_BYTES32 = b'\x00' * 32 + +zerohashes = [ZERO_BYTES32] for layer in range(1, 32): zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1])) @@ -28,3 +30,25 @@ def get_merkle_proof(tree, item_index): subindex = (item_index // 2**i) ^ 1 proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) return proof + + +def next_power_of_two(v: int) -> int: + """ + Get the next power of 2. (for 64 bit range ints). + 0 is a special case, to have non-empty defaults. + Examples: + 0 -> 1, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64 + """ + if v == 0: + return 1 + return 1 << (v - 1).bit_length() + + +def merkleize_chunks(chunks): + tree = chunks[::] + margin = next_power_of_two(len(chunks)) - len(chunks) + tree.extend([ZERO_BYTES32] * margin) + tree = [ZERO_BYTES32] * len(tree) + tree + for i in range(len(tree) // 2 - 1, 0, -1): + tree[i] = hash(tree[i * 2] + tree[i * 2 + 1]) + return tree[1] diff --git a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py deleted file mode 100644 index 9cc2baebb..000000000 --- a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py +++ /dev/null @@ -1,331 +0,0 @@ -from typing import Any - -from .hash_function import hash - -BYTES_PER_CHUNK = 32 -BYTES_PER_LENGTH_OFFSET = 4 -ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK - - -def SSZType(fields): - class SSZObject(): - def __init__(self, **kwargs): - for f, t in fields.items(): - if f not in kwargs: - setattr(self, f, get_zero_value(t)) - else: - setattr(self, f, kwargs[f]) - - def __eq__(self, other): - return self.fields == other.fields and self.serialize() == other.serialize() - - def __hash__(self): - return int.from_bytes(self.hash_tree_root(), byteorder="little") - - def __str__(self): - output = [] - for field in self.fields: - output.append(f'{field}: {getattr(self, field)}') - return "\n".join(output) - - def serialize(self): - return serialize_value(self, self.__class__) - - def hash_tree_root(self): - return hash_tree_root(self, self.__class__) - - SSZObject.fields = fields - return SSZObject - - -class Vector(): - def __init__(self, items): - self.items = items - self.length = len(items) - - def __getitem__(self, key): - return self.items[key] - - def __setitem__(self, key, value): - self.items[key] = value - - def __iter__(self): - return iter(self.items) - - def __len__(self): - return self.length - - -def is_basic(typ): - # if not a string, it is a complex, and cannot be basic - if not isinstance(typ, str): - return False - # "uintN": N-bit unsigned integer (where N in [8, 16, 32, 64, 128, 256]) - elif typ[:4] == 'uint' and typ[4:] in ['8', '16', '32', '64', '128', '256']: - return True - # "bool": True or False - elif typ == 'bool': - return True - # alias: "byte" -> "uint8" - elif typ == 'byte': - return True - # default - else: - return False - - -def is_constant_sized(typ): - # basic objects are fixed size by definition - if is_basic(typ): - return True - # dynamic size array type, "list": [elem_type]. - # Not constant size by definition. - elif isinstance(typ, list) and len(typ) == 1: - return False - # fixed size array type, "vector": [elem_type, length] - # Constant size, but only if the elements are. - elif isinstance(typ, list) and len(typ) == 2: - return is_constant_sized(typ[0]) - # bytes array (fixed or dynamic size) - elif isinstance(typ, str) and typ[:5] == 'bytes': - # if no length suffix, it has a dynamic size - return typ != 'bytes' - # containers are only constant-size if all of the fields are constant size. - elif hasattr(typ, 'fields'): - for subtype in typ.fields.values(): - if not is_constant_sized(subtype): - return False - return True - else: - raise Exception("Type not recognized") - - -def coerce_to_bytes(x): - if isinstance(x, str): - o = x.encode('utf-8') - assert len(o) == len(x) - return o - elif isinstance(x, bytes): - return x - else: - raise Exception("Expecting bytes") - - -def encode_series(values, types): - # Recursively serialize - parts = [(is_constant_sized(types[i]), serialize_value(values[i], types[i])) for i in range(len(values))] - - # Compute and check lengths - fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET - for (constant_size, serialized) in parts] - variable_lengths = [len(serialized) if not constant_size else 0 - for (constant_size, serialized) in parts] - - # Check if integer is not out of bounds (Python) - assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * 8) - - # Interleave offsets of variable-size parts with fixed-size parts. - # Avoid quadratic complexity in calculation of offsets. - offset = sum(fixed_lengths) - variable_parts = [] - fixed_parts = [] - for (constant_size, serialized) in parts: - if constant_size: - fixed_parts.append(serialized) - else: - fixed_parts.append(offset.to_bytes(BYTES_PER_LENGTH_OFFSET, 'little')) - variable_parts.append(serialized) - offset += len(serialized) - - # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts - return b"".join(fixed_parts + variable_parts) - - -def serialize_value(value, typ=None): - if typ is None: - typ = infer_type(value) - # "uintN" - if isinstance(typ, str) and typ[:4] == 'uint': - length = int(typ[4:]) - assert length in (8, 16, 32, 64, 128, 256) - return value.to_bytes(length // 8, 'little') - # "bool" - elif isinstance(typ, str) and typ == 'bool': - assert value in (True, False) - return b'\x01' if value is True else b'\x00' - # Vector - elif isinstance(typ, list) and len(typ) == 2: - # (regardless of element type, sanity-check if the length reported in the vector type matches the value length) - assert len(value) == typ[1] - return encode_series(value, [typ[0]] * len(value)) - # List - elif isinstance(typ, list) and len(typ) == 1: - return encode_series(value, [typ[0]] * len(value)) - # "bytes" (variable size) - elif isinstance(typ, str) and typ == 'bytes': - return coerce_to_bytes(value) - # "bytesN" (fixed size) - elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes': - assert len(value) == int(typ[5:]), (value, int(typ[5:])) - return coerce_to_bytes(value) - # containers - elif hasattr(typ, 'fields'): - values = [getattr(value, field) for field in typ.fields.keys()] - types = list(typ.fields.values()) - return encode_series(values, types) - else: - print(value, typ) - raise Exception("Type not recognized") - - -def get_zero_value(typ: Any) -> Any: - if isinstance(typ, str): - # Bytes array - if typ == 'bytes': - return b'' - # bytesN - elif typ[:5] == 'bytes' and len(typ) > 5: - length = int(typ[5:]) - return b'\x00' * length - # Basic types - elif typ == 'bool': - return False - elif typ[:4] == 'uint': - return 0 - elif typ == 'byte': - return 0x00 - else: - raise ValueError("Type not recognized") - # Vector: - elif isinstance(typ, list) and len(typ) == 2: - return [get_zero_value(typ[0]) for _ in range(typ[1])] - # List: - elif isinstance(typ, list) and len(typ) == 1: - return [] - # Container: - elif hasattr(typ, 'fields'): - return typ(**{field: get_zero_value(subtype) for field, subtype in typ.fields.items()}) - else: - print(typ) - raise Exception("Type not recognized") - - -def chunkify(bytez): - bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) - return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] - - -def pack(values, subtype): - return chunkify(b''.join([serialize_value(value, subtype) for value in values])) - - -def is_power_of_two(x): - return x > 0 and x & (x - 1) == 0 - - -def merkleize(chunks): - tree = chunks[::] - while not is_power_of_two(len(tree)): - tree.append(ZERO_CHUNK) - tree = [ZERO_CHUNK] * len(tree) + tree - for i in range(len(tree) // 2 - 1, 0, -1): - tree[i] = hash(tree[i * 2] + tree[i * 2 + 1]) - return tree[1] - - -def mix_in_length(root, length): - return hash(root + length.to_bytes(32, 'little')) - - -def infer_type(value): - """ - Note: defaults to uint64 for integer type inference due to lack of information. - Other integer sizes are still supported, see spec. - :param value: The value to infer a SSZ type for. - :return: The SSZ type. - """ - if hasattr(value.__class__, 'fields'): - return value.__class__ - elif isinstance(value, Vector): - if len(value) > 0: - return [infer_type(value[0]), len(value)] - else: - # Element type does not matter too much, - # assumed to be a basic type for size-encoding purposes, vector is empty. - return ['uint64'] - elif isinstance(value, list): - if len(value) > 0: - return [infer_type(value[0])] - else: - # Element type does not matter, list-content size will be encoded regardless, list is empty. - return ['uint64'] - elif isinstance(value, (bytes, str)): - return 'bytes' - elif isinstance(value, int): - return 'uint64' - else: - raise Exception("Failed to infer type") - - -def hash_tree_root(value, typ=None): - if typ is None: - typ = infer_type(value) - # ------------------------------------- - # merkleize(pack(value)) - # basic object: merkleize packed version (merkleization pads it to 32 bytes if it is not already) - if is_basic(typ): - return merkleize(pack([value], typ)) - # or a vector of basic objects - elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]): - assert len(value) == typ[1] - return merkleize(pack(value, typ[0])) - # ------------------------------------- - # mix_in_length(merkleize(pack(value)), len(value)) - # if value is a list of basic objects - elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): - return mix_in_length(merkleize(pack(value, typ[0])), len(value)) - # (needs some extra work for non-fixed-sized bytes array) - elif typ == 'bytes': - return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value)) - # ------------------------------------- - # merkleize([hash_tree_root(element) for element in value]) - # if value is a vector of composite objects - elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): - return merkleize([hash_tree_root(element, typ[0]) for element in value]) - # (needs some extra work for fixed-sized bytes array) - elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5: - assert len(value) == int(typ[5:]) - return merkleize(chunkify(coerce_to_bytes(value))) - # or a container - elif hasattr(typ, 'fields'): - return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) - # ------------------------------------- - # mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value)) - # if value is a list of composite objects - elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): - return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) - # ------------------------------------- - else: - raise Exception("Type not recognized") - - -def truncate(container): - field_keys = list(container.fields.keys()) - truncated_fields = { - key: container.fields[key] - for key in field_keys[:-1] - } - truncated_class = SSZType(truncated_fields) - kwargs = { - field: getattr(container, field) - for field in field_keys[:-1] - } - return truncated_class(**kwargs) - - -def signing_root(container): - return hash_tree_root(truncate(container)) - - -def serialize(ssz_object): - return getattr(ssz_object, 'serialize')() diff --git a/test_libs/pyspec/eth2spec/utils/ssz/__init__.py b/test_libs/pyspec/eth2spec/utils/ssz/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py new file mode 100644 index 000000000..b3c877d48 --- /dev/null +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py @@ -0,0 +1,163 @@ +from ..merkle_minimal import merkleize_chunks, hash +from eth2spec.utils.ssz.ssz_typing import ( + is_uint_type, is_bool_type, is_container_type, + is_list_kind, is_vector_kind, + read_vector_elem_type, read_elem_type, + uint_byte_size, + infer_input_type, + get_zero_value, +) + +# SSZ Serialization +# ----------------------------- + +BYTES_PER_LENGTH_OFFSET = 4 + + +def is_basic_type(typ): + return is_uint_type(typ) or is_bool_type(typ) + + +def serialize_basic(value, typ): + if is_uint_type(typ): + return value.to_bytes(uint_byte_size(typ), 'little') + elif is_bool_type(typ): + if value: + return b'\x01' + else: + return b'\x00' + else: + raise Exception("Type not supported: {}".format(typ)) + + +def deserialize_basic(value, typ): + if is_uint_type(typ): + return typ(int.from_bytes(value, 'little')) + elif is_bool_type(typ): + assert value in (b'\x00', b'\x01') + return True if value == b'\x01' else False + else: + raise Exception("Type not supported: {}".format(typ)) + + +def is_fixed_size(typ): + if is_basic_type(typ): + return True + elif is_list_kind(typ): + return False + elif is_vector_kind(typ): + return is_fixed_size(read_vector_elem_type(typ)) + elif is_container_type(typ): + return all(is_fixed_size(t) for t in typ.get_field_types()) + else: + raise Exception("Type not supported: {}".format(typ)) + + +def is_empty(obj): + return get_zero_value(type(obj)) == obj + + +@infer_input_type +def serialize(obj, typ=None): + if is_basic_type(typ): + return serialize_basic(obj, typ) + elif is_list_kind(typ) or is_vector_kind(typ): + return encode_series(obj, [read_elem_type(typ)] * len(obj)) + elif is_container_type(typ): + return encode_series(obj.get_field_values(), typ.get_field_types()) + else: + raise Exception("Type not supported: {}".format(typ)) + + +def encode_series(values, types): + # bytes and bytesN are already in the right format. + if isinstance(values, bytes): + return values + + # Recursively serialize + parts = [(is_fixed_size(types[i]), serialize(values[i], typ=types[i])) for i in range(len(values))] + + # Compute and check lengths + fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET + for (constant_size, serialized) in parts] + variable_lengths = [len(serialized) if not constant_size else 0 + for (constant_size, serialized) in parts] + + # Check if integer is not out of bounds (Python) + assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * 8) + + # Interleave offsets of variable-size parts with fixed-size parts. + # Avoid quadratic complexity in calculation of offsets. + offset = sum(fixed_lengths) + variable_parts = [] + fixed_parts = [] + for (constant_size, serialized) in parts: + if constant_size: + fixed_parts.append(serialized) + else: + fixed_parts.append(offset.to_bytes(BYTES_PER_LENGTH_OFFSET, 'little')) + variable_parts.append(serialized) + offset += len(serialized) + + # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts + return b''.join(fixed_parts + variable_parts) + + +# SSZ Hash-tree-root +# ----------------------------- + + +def pack(values, subtype): + if isinstance(values, bytes): + return values + return b''.join([serialize_basic(value, subtype) for value in values]) + + +def chunkify(bytez): + # pad `bytez` to nearest 32-byte multiple + bytez += b'\x00' * (-len(bytez) % 32) + return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] + + +def mix_in_length(root, length): + return hash(root + length.to_bytes(32, 'little')) + + +def is_bottom_layer_kind(typ): + return ( + is_basic_type(typ) or + (is_list_kind(typ) or is_vector_kind(typ)) and is_basic_type(read_elem_type(typ)) + ) + + +@infer_input_type +def get_typed_values(obj, typ=None): + if is_container_type(typ): + return obj.get_typed_values() + elif is_list_kind(typ) or is_vector_kind(typ): + elem_type = read_elem_type(typ) + return list(zip(obj, [elem_type] * len(obj))) + else: + raise Exception("Invalid type") + + +@infer_input_type +def hash_tree_root(obj, typ=None): + if is_bottom_layer_kind(typ): + data = serialize_basic(obj, typ) if is_basic_type(typ) else pack(obj, read_elem_type(typ)) + leaves = chunkify(data) + else: + fields = get_typed_values(obj, typ=typ) + leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in fields] + if is_list_kind(typ): + return mix_in_length(merkleize_chunks(leaves), len(obj)) + else: + return merkleize_chunks(leaves) + + +@infer_input_type +def signing_root(obj, typ): + assert is_container_type(typ) + # ignore last field + leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in obj.get_typed_values()[:-1]] + return merkleize_chunks(chunkify(b''.join(leaves))) diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py new file mode 100644 index 000000000..368041f90 --- /dev/null +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -0,0 +1,525 @@ +from typing import List, Iterable, TypeVar, Type, NewType +from typing import Union +from typing_inspect import get_origin + +# SSZ integers +# ----------------------------- + + +class uint(int): + byte_len = 0 + + def __new__(cls, value, *args, **kwargs): + if value < 0: + raise ValueError("unsigned types must not be negative") + return super().__new__(cls, value) + + +class uint8(uint): + byte_len = 1 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 8: + raise ValueError("value out of bounds for uint8") + return super().__new__(cls, value) + + +# Alias for uint8 +byte = NewType('byte', uint8) + + +class uint16(uint): + byte_len = 2 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 16: + raise ValueError("value out of bounds for uint16") + return super().__new__(cls, value) + + +class uint32(uint): + byte_len = 4 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 32: + raise ValueError("value out of bounds for uint16") + return super().__new__(cls, value) + + +# We simply default to uint64. But do give it a name, for readability +uint64 = NewType('uint64', int) + + +class uint128(uint): + byte_len = 16 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 128: + raise ValueError("value out of bounds for uint128") + return super().__new__(cls, value) + + +class uint256(uint): + byte_len = 32 + + def __new__(cls, value, *args, **kwargs): + if value.bit_length() > 256: + raise ValueError("value out of bounds for uint256") + return super().__new__(cls, value) + + +def is_uint_type(typ): + # All integers are uint in the scope of the spec here. + # Since we default to uint64. Bounds can be checked elsewhere. + # However, some are wrapped in a NewType + if hasattr(typ, '__supertype__'): + # get the type that the NewType is wrapping + typ = typ.__supertype__ + + return isinstance(typ, type) and issubclass(typ, int) and not issubclass(typ, bool) + + +def uint_byte_size(typ): + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + + if isinstance(typ, type): + if issubclass(typ, uint): + return typ.byte_len + elif issubclass(typ, int): + # Default to uint64 + return 8 + else: + raise TypeError("Type %s is not an uint (or int-default uint64) type" % typ) + + +# SSZ Container base class +# ----------------------------- + +# Note: importing ssz functionality locally, to avoid import loop + +class Container(object): + + def __init__(self, **kwargs): + cls = self.__class__ + for f, t in cls.get_fields(): + if f not in kwargs: + setattr(self, f, get_zero_value(t)) + else: + setattr(self, f, kwargs[f]) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + def signing_root(self): + from .ssz_impl import signing_root + return signing_root(self, self.__class__) + + def get_field_values(self): + cls = self.__class__ + return [getattr(self, field) for field in cls.get_field_names()] + + def __repr__(self): + return repr({field: getattr(self, field) for field in self.get_field_names()}) + + def __str__(self): + output = [] + for field in self.get_field_names(): + output.append(f'{field}: {getattr(self, field)}') + return "\n".join(output) + + def __eq__(self, other): + return self.hash_tree_root() == other.hash_tree_root() + + def __hash__(self): + return hash(self.hash_tree_root()) + + @classmethod + def get_fields_dict(cls): + return dict(cls.__annotations__) + + @classmethod + def get_fields(cls): + return list(dict(cls.__annotations__).items()) + + def get_typed_values(self): + return list(zip(self.get_field_values(), self.get_field_types())) + + @classmethod + def get_field_names(cls): + return list(cls.__annotations__.keys()) + + @classmethod + def get_field_types(cls): + # values of annotations are the types corresponding to the fields, not instance values. + return list(cls.__annotations__.values()) + + +# SSZ vector +# ----------------------------- + + +def _is_vector_instance_of(a, b): + # Other must not be a BytesN + if issubclass(b, bytes): + return False + elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector (b) is not an instance of Vector[X, Y] (a) + return False + elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'): + # Vector[X, Y] (b) is an instance of Vector (a) + return True + else: + # Vector[X, Y] (a) is an instance of Vector[X, Y] (b) + return a.elem_type == b.elem_type and a.length == b.length + + +def _is_equal_vector_type(a, b): + # Other must not be a BytesN + if issubclass(b, bytes): + return False + elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'): + if not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector == Vector + return True + else: + # Vector != Vector[X, Y] + return False + elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'): + # Vector[X, Y] != Vector + return False + else: + # Vector[X, Y] == Vector[X, Y] + return a.elem_type == b.elem_type and a.length == b.length + + +class VectorMeta(type): + def __new__(cls, class_name, parents, attrs): + out = type.__new__(cls, class_name, parents, attrs) + if 'elem_type' in attrs and 'length' in attrs: + setattr(out, 'elem_type', attrs['elem_type']) + setattr(out, 'length', attrs['length']) + return out + + def __getitem__(self, params): + if not isinstance(params, tuple) or len(params) != 2: + raise Exception("Vector must be instantiated with two args: elem type and length") + o = self.__class__(self.__name__, (Vector,), {'elem_type': params[0], 'length': params[1]}) + o._name = 'Vector' + return o + + def __subclasscheck__(self, sub): + return _is_vector_instance_of(self, sub) + + def __instancecheck__(self, other): + return _is_vector_instance_of(self, other.__class__) + + def __eq__(self, other): + return _is_equal_vector_type(self, other) + + def __ne__(self, other): + return not _is_equal_vector_type(self, other) + + def __hash__(self): + return hash(self.__class__) + + +class Vector(metaclass=VectorMeta): + + def __init__(self, *args: Iterable): + cls = self.__class__ + if not hasattr(cls, 'elem_type'): + raise TypeError("Type Vector without elem_type data cannot be instantiated") + elif not hasattr(cls, 'length'): + raise TypeError("Type Vector without length data cannot be instantiated") + + if len(args) != cls.length: + if len(args) == 0: + args = [get_zero_value(cls.elem_type) for _ in range(cls.length)] + else: + raise TypeError("Typed vector with length %d cannot hold %d items" % (cls.length, len(args))) + + self.items = list(args) + + # cannot check non-type objects, or parametrized types + if isinstance(cls.elem_type, type) and not hasattr(cls.elem_type, '__args__'): + for i, item in enumerate(self.items): + if not issubclass(type(item), cls.elem_type): + raise TypeError("Typed vector cannot hold differently typed value" + " at index %d. Got type: %s, expected type: %s" % (i, type(item), cls.elem_type)) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + def __repr__(self): + return repr({'length': self.__class__.length, 'items': self.items}) + + def __getitem__(self, key): + return self.items[key] + + def __setitem__(self, key, value): + self.items[key] = value + + def __iter__(self): + return iter(self.items) + + def __len__(self): + return len(self.items) + + def __eq__(self, other): + return self.hash_tree_root() == other.hash_tree_root() + + +# SSZ BytesN +# ----------------------------- + + +def _is_bytes_n_instance_of(a, b): + # Other has to be a Bytes derivative class to be a BytesN + if not issubclass(b, bytes): + return False + elif not hasattr(b, 'length'): + # BytesN (b) is not an instance of BytesN[X] (a) + return False + elif not hasattr(a, 'length'): + # BytesN[X] (b) is an instance of BytesN (a) + return True + else: + # BytesN[X] (a) is an instance of BytesN[X] (b) + return a.length == b.length + + +def _is_equal_bytes_n_type(a, b): + # Other has to be a Bytes derivative class to be a BytesN + if not issubclass(b, bytes): + return False + elif not hasattr(a, 'length'): + if not hasattr(b, 'length'): + # BytesN == BytesN + return True + else: + # BytesN != BytesN[X] + return False + elif not hasattr(b, 'length'): + # BytesN[X] != BytesN + return False + else: + # BytesN[X] == BytesN[X] + return a.length == b.length + + +class BytesNMeta(type): + def __new__(cls, class_name, parents, attrs): + out = type.__new__(cls, class_name, parents, attrs) + if 'length' in attrs: + setattr(out, 'length', attrs['length']) + out._name = 'BytesN' + out.elem_type = byte + return out + + def __getitem__(self, n): + return self.__class__(self.__name__, (BytesN,), {'length': n}) + + def __subclasscheck__(self, sub): + return _is_bytes_n_instance_of(self, sub) + + def __instancecheck__(self, other): + return _is_bytes_n_instance_of(self, other.__class__) + + def __eq__(self, other): + return _is_equal_bytes_n_type(self, other) + + def __ne__(self, other): + return not _is_equal_bytes_n_type(self, other) + + def __hash__(self): + return hash(self.__class__) + + +def parse_bytes(val): + if val is None: + return None + elif isinstance(val, str): + # TODO: import from eth-utils instead, and do: hexstr_if_str(to_bytes, val) + return None + elif isinstance(val, bytes): + return val + elif isinstance(val, int): + return bytes([val]) + else: + return None + + +class BytesN(bytes, metaclass=BytesNMeta): + def __new__(cls, *args): + if not hasattr(cls, 'length'): + return + bytesval = None + if len(args) == 1: + val: Union[bytes, int, str] = args[0] + bytesval = parse_bytes(val) + elif len(args) > 1: + # TODO: each int is 1 byte, check size, create bytesval + bytesval = bytes(args) + + if bytesval is None: + if cls.length == 0: + bytesval = b'' + else: + bytesval = b'\x00' * cls.length + if len(bytesval) != cls.length: + raise TypeError("BytesN[%d] cannot be initialized with value of %d bytes" % (cls.length, len(bytesval))) + return super().__new__(cls, bytesval) + + def serialize(self): + from .ssz_impl import serialize + return serialize(self, self.__class__) + + def hash_tree_root(self): + from .ssz_impl import hash_tree_root + return hash_tree_root(self, self.__class__) + + +# SSZ Defaults +# ----------------------------- +def get_zero_value(typ): + if is_uint_type(typ): + return 0 + elif is_list_type(typ): + return [] + elif is_bool_type(typ): + return False + elif is_vector_type(typ): + return typ() + elif is_bytesn_type(typ): + return typ() + elif is_bytes_type(typ): + return b'' + elif is_container_type(typ): + return typ(**{f: get_zero_value(t) for f, t in typ.get_fields()}) + else: + raise Exception("Type not supported: {}".format(typ)) + + +# Type helpers +# ----------------------------- + + +def infer_type(obj): + if is_uint_type(obj.__class__): + return obj.__class__ + elif isinstance(obj, int): + return uint64 + elif isinstance(obj, list): + return List[infer_type(obj[0])] + elif isinstance(obj, (Vector, Container, bool, BytesN, bytes)): + return obj.__class__ + else: + raise Exception("Unknown type for {}".format(obj)) + + +def infer_input_type(fn): + """ + Decorator to run infer_type on the obj if typ argument is None + """ + def infer_helper(obj, typ=None, **kwargs): + if typ is None: + typ = infer_type(obj) + return fn(obj, typ=typ, **kwargs) + return infer_helper + + +def is_bool_type(typ): + """ + Check if the given type is a bool. + """ + if hasattr(typ, '__supertype__'): + typ = typ.__supertype__ + return isinstance(typ, type) and issubclass(typ, bool) + + +def is_list_type(typ): + """ + Check if the given type is a list. + """ + return get_origin(typ) is List or get_origin(typ) is list + + +def is_bytes_type(typ): + """ + Check if the given type is a ``bytes``. + """ + # Do not accept subclasses of bytes here, to avoid confusion with BytesN + return typ == bytes + + +def is_bytesn_type(typ): + """ + Check if the given type is a BytesN. + """ + return isinstance(typ, type) and issubclass(typ, BytesN) + + +def is_list_kind(typ): + """ + Check if the given type is a kind of list. Can be bytes. + """ + return is_list_type(typ) or is_bytes_type(typ) + + +def is_vector_type(typ): + """ + Check if the given type is a vector. + """ + return isinstance(typ, type) and issubclass(typ, Vector) + + +def is_vector_kind(typ): + """ + Check if the given type is a kind of vector. Can be BytesN. + """ + return is_vector_type(typ) or is_bytesn_type(typ) + + +def is_container_type(typ): + """ + Check if the given type is a container. + """ + return isinstance(typ, type) and issubclass(typ, Container) + + +T = TypeVar('T') +L = TypeVar('L') + + +def read_list_elem_type(list_typ: Type[List[T]]) -> T: + if list_typ.__args__ is None or len(list_typ.__args__) != 1: + raise TypeError("Supplied list-type is invalid, no element type found.") + return list_typ.__args__[0] + + +def read_vector_elem_type(vector_typ: Type[Vector[T, L]]) -> T: + return vector_typ.elem_type + + +def read_elem_type(typ): + if typ == bytes: + return byte + elif is_list_type(typ): + return read_list_elem_type(typ) + elif is_vector_type(typ): + return read_vector_elem_type(typ) + elif issubclass(typ, bytes): # bytes or bytesN + return byte + else: + raise TypeError("Unexpected type: {}".format(typ)) diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt index 388a878a9..331d0fa28 100644 --- a/test_libs/pyspec/requirements-testing.txt +++ b/test_libs/pyspec/requirements-testing.txt @@ -1,3 +1,4 @@ -r requirements.txt pytest>=3.6,<3.7 ../config_helpers +flake8==3.7.7 diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt index 78d41708d..3b38930bd 100644 --- a/test_libs/pyspec/requirements.txt +++ b/test_libs/pyspec/requirements.txt @@ -2,3 +2,4 @@ eth-utils>=1.3.0,<2 eth-typing>=2.1.0,<3.0.0 pycryptodome==3.7.3 py_ecc>=1.6.0 +typing_inspect==0.4.0 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 1a131a417..e99b911ee 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -9,5 +9,6 @@ setup( "eth-typing>=2.1.0,<3.0.0", "pycryptodome==3.7.3", "py_ecc>=1.6.0", + "typing_inspect==0.4.0" ] )