mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-02-01 15:34:55 -05:00
Merge branch 'dev' into pr3288
This commit is contained in:
@@ -60,7 +60,7 @@ commands:
|
||||
jobs:
|
||||
checkout_specs:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
# Restore git repo at point close to target branch/revision, to speed up checkout
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
- ~/specs-repo
|
||||
install_pyspec_test:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
- save_pyspec_cached_venv
|
||||
test-phase0:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-altair:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -118,7 +118,7 @@ jobs:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-bellatrix:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-capella:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -144,7 +144,7 @@ jobs:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-deneb:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -155,6 +155,19 @@ jobs:
|
||||
command: make citest fork=deneb
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-eip6110:
|
||||
docker:
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run py-tests
|
||||
command: make citest fork=eip6110
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
table_of_contents:
|
||||
docker:
|
||||
- image: circleci/node:10.16.3
|
||||
@@ -166,7 +179,7 @@ jobs:
|
||||
command: sudo npm install -g doctoc@2 && make check_toc
|
||||
codespell:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- checkout
|
||||
@@ -175,7 +188,7 @@ jobs:
|
||||
command: pip install 'codespell<3.0.0,>=2.0.0' --user && make codespell
|
||||
lint:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -231,7 +244,7 @@ jobs:
|
||||
- /nix
|
||||
install_deposit_contract_web3_tester:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -243,7 +256,7 @@ jobs:
|
||||
- save_deposit_contract_tester_cached_venv
|
||||
test_deposit_contract_web3_tests:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -275,6 +288,9 @@ workflows:
|
||||
- test-deneb:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- test-eip6110:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- table_of_contents
|
||||
- codespell
|
||||
- lint:
|
||||
|
||||
24
.github/workflows/docs.yml
vendored
Normal file
24
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
|
||||
name: Publish docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build docs
|
||||
run: make copy_docs
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- run: pip install -e .[docs]
|
||||
- run: mkdocs gh-deploy --force
|
||||
2
.github/workflows/run-tests.yml
vendored
2
.github/workflows/run-tests.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
needs: [preclear,lint,codespell,table_of_contents]
|
||||
strategy:
|
||||
matrix:
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "deneb"]
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110"]
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -20,6 +20,7 @@ tests/core/pyspec/eth2spec/altair/
|
||||
tests/core/pyspec/eth2spec/bellatrix/
|
||||
tests/core/pyspec/eth2spec/capella/
|
||||
tests/core/pyspec/eth2spec/deneb/
|
||||
tests/core/pyspec/eth2spec/eip6110/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
@@ -34,3 +35,11 @@ tests/core/pyspec/eth2spec/test_results.xml
|
||||
|
||||
# TOC tool outputs temporary files
|
||||
*.tmp
|
||||
|
||||
# docs reader build
|
||||
docs/specs
|
||||
docs/sync
|
||||
docs/ssz
|
||||
docs/fork_choice
|
||||
docs/README.md
|
||||
site
|
||||
|
||||
27
Makefile
27
Makefile
@@ -19,6 +19,11 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
|
||||
# Map this list of generator paths to "gen_{generator name}" entries
|
||||
GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS))
|
||||
GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS))
|
||||
# Documents
|
||||
DOCS_DIR = ./docs
|
||||
SSZ_DIR = ./ssz
|
||||
SYNC_DIR = ./sync
|
||||
FORK_CHOICE_DIR = ./fork_choice
|
||||
|
||||
# To check generator matching:
|
||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||
@@ -29,7 +34,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
|
||||
$(wildcard $(SSZ_DIR)/*.md)
|
||||
|
||||
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb
|
||||
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110
|
||||
# The parameters for commands. Use `foreach` to avoid listing specs again.
|
||||
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
||||
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
|
||||
@@ -214,3 +219,23 @@ detect_generator_incomplete: $(TEST_VECTOR_DIR)
|
||||
|
||||
detect_generator_error_log: $(TEST_VECTOR_DIR)
|
||||
[ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist"
|
||||
|
||||
|
||||
# For docs reader
|
||||
install_docs:
|
||||
python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[docs];
|
||||
|
||||
copy_docs:
|
||||
cp -r $(SPEC_DIR) $(DOCS_DIR);
|
||||
cp -r $(SYNC_DIR) $(DOCS_DIR);
|
||||
cp -r $(SSZ_DIR) $(DOCS_DIR);
|
||||
cp -r $(FORK_CHOICE_DIR) $(DOCS_DIR);
|
||||
cp $(CURRENT_DIR)/README.md $(DOCS_DIR)/README.md
|
||||
|
||||
build_docs: copy_docs
|
||||
. venv/bin/activate;
|
||||
mkdocs build
|
||||
|
||||
serve_docs:
|
||||
. venv/bin/activate;
|
||||
mkdocs serve
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Ethereum Proof-of-Stake Consensus Specifications
|
||||
|
||||
[](https://discord.gg/qGpsxSA) [](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://discord.gg/qGpsxSA)
|
||||
|
||||
To learn more about proof-of-stake and sharding, see the [PoS documentation](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/), [sharding documentation](https://ethereum.org/en/upgrades/sharding/) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
||||
|
||||
@@ -20,15 +20,16 @@ Features are researched and developed in parallel, and then consolidated into se
|
||||
| 0 | **Phase0** |`0` | <ul><li>Core</li><ul><li>[The beacon chain](specs/phase0/beacon-chain.md)</li><li>[Deposit contract](specs/phase0/deposit-contract.md)</li><li>[Beacon chain fork choice](specs/phase0/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide](specs/phase0/validator.md)</li><li>[P2P networking](specs/phase0/p2p-interface.md)</li><li>[Weak subjectivity](specs/phase0/weak-subjectivity.md)</li></ul></ul> |
|
||||
| 1 | **Altair** | `74240` | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/altair/beacon-chain.md)</li><li>[Altair fork](specs/altair/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))</li><li>[Honest validator guide changes](specs/altair/validator.md)</li><li>[P2P networking](specs/altair/p2p-interface.md)</li></ul></ul> |
|
||||
| 2 | **Bellatrix** <br/> (["The Merge"](https://ethereum.org/en/upgrades/merge/)) | `144896` | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/bellatrix/beacon-chain.md)</li><li>[Bellatrix fork](specs/bellatrix/fork.md)</li><li>[Fork choice changes](specs/bellatrix/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/bellatrix/validator.md)</li><li>[P2P networking](specs/bellatrix/p2p-interface.md)</li></ul></ul> |
|
||||
| 3 | **Capella** | `194048` | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
|
||||
|
||||
### In-development Specifications
|
||||
| Code Name or Topic | Specs | Notes |
|
||||
| - | - | - |
|
||||
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
|
||||
| Deneb (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/deneb/beacon-chain.md)</li><li>[Deneb fork](specs/deneb/fork.md)</li><li>[Polynomial commitments](specs/deneb/polynomial-commitments.md)</li><li>[Fork choice changes](specs/deneb/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))</li></ul><ul><li>[Honest validator guide changes](specs/deneb/validator.md)</li><li>[P2P networking](specs/deneb/p2p-interface.md)</li></ul></ul> |
|
||||
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/_features/sharding/p2p-interface.md)</li></ul></ul> |
|
||||
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
|
||||
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/_features/das/das-core.md)</li><li>[Fork choice changes](specs/_features/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/_features/das/p2p-interface.md)</li><li>[Sampling process](specs/_features/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
|
||||
| EIP-6110 | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)</li><li>[EIP-6110 fork](specs/_features/eip6110/fork.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/eip6110/validator.md)</li></ul></ul> |
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
@@ -64,6 +65,10 @@ Documentation on the different components used during spec writing can be found
|
||||
* [YAML Test Generators](tests/generators/README.md)
|
||||
* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md)
|
||||
|
||||
## Online viewer of the latest release (latest `master` branch)
|
||||
|
||||
[Ethereum Consensus Specs](https://ethereum.github.io/consensus-specs/)
|
||||
|
||||
## Consensus spec tests
|
||||
|
||||
Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases).
|
||||
|
||||
@@ -8,4 +8,4 @@ Please see [Releases](https://github.com/ethereum/consensus-specs/releases/). We
|
||||
|
||||
**Please do not file a public ticket** mentioning the vulnerability.
|
||||
|
||||
To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://eth2bounty.ethereum.org](https://eth2bounty.ethereum.org) or email eth2bounty@ethereum.org. Please read the [disclosure page](https://eth2bounty.ethereum.org) for more information about publicly disclosed security vulnerabilities.
|
||||
To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://ethereum.org/bug-bounty](https://ethereum.org/bug-bounty) or email bounty@ethereum.org. Please read the [disclosure page](https://ethereum.org/bug-bounty) for more information about publicly disclosed security vulnerabilities.
|
||||
|
||||
@@ -46,12 +46,13 @@ BELLATRIX_FORK_VERSION: 0x02000000
|
||||
BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x03000000
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC
|
||||
# Deneb
|
||||
DENEB_FORK_VERSION: 0x04000000
|
||||
DENEB_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# EIP6110
|
||||
EIP6110_FORK_VERSION: 0x05000000 # temporary stub
|
||||
EIP6110_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -49,6 +49,9 @@ CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
# DENEB
|
||||
DENEB_FORK_VERSION: 0x04000001
|
||||
DENEB_FORK_EPOCH: 18446744073709551615
|
||||
# EIP6110
|
||||
EIP6110_FORK_VERSION: 0x05000001
|
||||
EIP6110_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
|
||||
5
docs/.pages
Normal file
5
docs/.pages
Normal file
@@ -0,0 +1,5 @@
|
||||
nav:
|
||||
- Home:
|
||||
- README.md
|
||||
- specs
|
||||
- ...
|
||||
163
docs/docs/new-feature.md
Normal file
163
docs/docs/new-feature.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# How to add a new feature proposal in consensus-specs
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
## Table of Contents
|
||||
|
||||
- [A. Make it executable for linter checks](#a-make-it-executable-for-linter-checks)
|
||||
- [1. Create a folder under `./specs/_features`](#1-create-a-folder-under-specs_features)
|
||||
- [2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version.](#2-choose-the-previous-fork-to-extend-usually-use-the-scheduled-or-the-latest-mainnet-fork-version)
|
||||
- [3. Write down your proposed `beacon-chain.md` change](#3-write-down-your-proposed-beacon-chainmd-change)
|
||||
- [4. Add `fork.md`](#4-add-forkmd)
|
||||
- [5. Make it executable](#5-make-it-executable)
|
||||
- [B: Make it executable for pytest and test generator](#b-make-it-executable-for-pytest-and-test-generator)
|
||||
- [1. Add `light-client/*` docs if you updated the content of `BeaconBlock`](#1-add-light-client-docs-if-you-updated-the-content-of-beaconblock)
|
||||
- [2. Add the mainnet and minimal presets and update the configs](#2-add-the-mainnet-and-minimal-presets-and-update-the-configs)
|
||||
- [3. Update `context.py`](#3-update-contextpy)
|
||||
- [4. Update `constants.py`](#4-update-constantspy)
|
||||
- [5. Update `genesis.py`:](#5-update-genesispy)
|
||||
- [6. To add fork transition tests, update fork_transition.py](#6-to-add-fork-transition-tests-update-fork_transitionpy)
|
||||
- [7. Update CI configurations](#7-update-ci-configurations)
|
||||
- [Others](#others)
|
||||
- [Bonus](#bonus)
|
||||
- [Need help?](#need-help)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
|
||||
## A. Make it executable for linter checks
|
||||
|
||||
### 1. Create a folder under `./specs/_features`
|
||||
|
||||
For example, if it's an `EIP-9999` CL spec, you can create a `./specs/_features/eip9999` folder.
|
||||
|
||||
### 2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version.
|
||||
|
||||
For example, if the latest fork is Capella, use `./specs/capella` content as your "previous fork".
|
||||
|
||||
### 3. Write down your proposed `beacon-chain.md` change
|
||||
- You can either use [Beacon Chain Spec Template](./templates/beacon-chain-template.md), or make a copy of the latest fork content and then edit it.
|
||||
- Tips:
|
||||
- We use [`doctoc`](https://www.npmjs.com/package/doctoc) tool to generate the table of content.
|
||||
```
|
||||
cd consensus-specs
|
||||
doctoc specs
|
||||
```
|
||||
- The differences between "Constants", "Configurations", and "Presets":
|
||||
- Constants: The constant that should never be changed.
|
||||
- Configurations: The settings that we may change for different networks.
|
||||
- Presets: The settings that we may change for testing.
|
||||
- Readability and simplicity are more important than efficiency and optimization.
|
||||
- Use simple Python rather than the fancy Python dark magic.
|
||||
|
||||
### 4. Add `fork.md`
|
||||
You can refer to the previous fork's `fork.md` file.
|
||||
### 5. Make it executable
|
||||
- Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name.
|
||||
- Update [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py):
|
||||
- Add a new `SpecBuilder` with the new feature name constant. e.g., `EIP9999SpecBuilder`
|
||||
- Add the new `SpecBuilder` to `spec_builders` list.
|
||||
- Add the path of the new markdown files in `finalize_options` function.
|
||||
|
||||
## B: Make it executable for pytest and test generator
|
||||
|
||||
### 1. Add `light-client/*` docs if you updated the content of `BeaconBlock`
|
||||
- You can refer to the previous fork's `light-client/*` file.
|
||||
- Add the path of the new markdown files in `setup.py`'s `finalize_options` function.
|
||||
|
||||
### 2. Add the mainnet and minimal presets and update the configs
|
||||
- Add presets: `presets/mainnet/<new-feature-name>.yaml` and `presets/minimal/<new-feature-name>.yaml`
|
||||
- Update configs: `configs/mainnet.yaml` and `configs/minimal.yaml`
|
||||
|
||||
### 3. Update [`context.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py)
|
||||
- Update `spec_targets` by adding `<NEW_FEATURE>`
|
||||
|
||||
```python
|
||||
from eth2spec.eip9999 import mainnet as spec_eip9999_mainnet, minimal as spec_eip9999_minimal
|
||||
|
||||
...
|
||||
|
||||
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
MINIMAL: {
|
||||
...
|
||||
EIP9999: spec_eip9999_minimal,
|
||||
},
|
||||
MAINNET: {
|
||||
...
|
||||
EIP9999: spec_eip9999_mainnet
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py)
|
||||
- Add `<NEW_FEATURE>` to `ALL_PHASES` and `TESTGEN_FORKS`
|
||||
|
||||
### 5. Update [`genesis.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/genesis.py):
|
||||
|
||||
We use `create_genesis_state` to create the default `state` in tests.
|
||||
|
||||
- Update `create_genesis_state` by adding `fork_version` setting:
|
||||
|
||||
```python
|
||||
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
...
|
||||
if spec.fork == ALTAIR:
|
||||
current_version = spec.config.ALTAIR_FORK_VERSION
|
||||
...
|
||||
elif spec.fork == EIP9999:
|
||||
# Add the previous fork version of given fork
|
||||
previous_version = spec.config.<PREVIOUS_FORK_VERSION>
|
||||
current_version = spec.config.EIP9999_FORK_VERSION
|
||||
```
|
||||
|
||||
- If the given feature changes `BeaconState` fields, you have to set the initial values by adding:
|
||||
|
||||
```python
|
||||
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
...
|
||||
if is_post_eip9999(spec):
|
||||
state.<NEW_FIELD> = <value>
|
||||
|
||||
return state
|
||||
```
|
||||
|
||||
- If the given feature changes `ExecutionPayload` fields, you have to set the initial values by updating `get_sample_genesis_execution_payload_header` helper.
|
||||
|
||||
### 6. To add fork transition tests, update [fork_transition.py](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py)
|
||||
|
||||
```python
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
|
||||
...
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
...
|
||||
elif post_spec.fork == EIP9999:
|
||||
state = post_spec.upgrade_to_eip9999(state)
|
||||
|
||||
...
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
...
|
||||
elif post_spec.fork == EIP9999:
|
||||
assert state.fork.previous_version == post_spec.config.<PREVIOUS_FORK_VERSION>
|
||||
assert state.fork.current_version == post_spec.config.EIP9999_FORK_VERSION
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
### 7. Update CI configurations
|
||||
- Update [GitHub Actions config](https://github.com/ethereum/consensus-specs/blob/dev/.github/workflows/run-tests.yml)
|
||||
- Update `pyspec-tests.strategy.matrix.version` list by adding new feature to it
|
||||
- Update [CircleCI config](https://github.com/ethereum/consensus-specs/blob/dev/.circleci/config.yml)
|
||||
- Add new job to the `workflows.test_spec.jobs`
|
||||
|
||||
## Others
|
||||
|
||||
### Bonus
|
||||
- Add `validator.md` if honest validator behavior changes with the new feature.
|
||||
|
||||
### Need help?
|
||||
You can tag spec elves for cleaning up your PR. 🧚
|
||||
84
docs/docs/templates/beacon-chain-template.md
vendored
Normal file
84
docs/docs/templates/beacon-chain-template.md
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
# `beacon-chain.md` Template
|
||||
|
||||
# <FORK_NAME> -- The Beacon Chain
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
## Notation
|
||||
|
||||
## Custom types
|
||||
|
||||
## Constants
|
||||
|
||||
### [CATEGORY OF CONSTANTS]
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `<CONSTANT_NAME>` | `<VALUE>`` |
|
||||
|
||||
## Preset
|
||||
|
||||
|
||||
### [CATEGORY OF PRESETS]
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `<PRESET_FIELD_NAME>` | `<VALUE>` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### [CATEGORY OF CONFIGURATIONS]
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `<CONFIGURATION_FIELD_NAME>` | `<VALUE>` |
|
||||
|
||||
## Containers
|
||||
|
||||
### [CATEGORY OF CONTAINERS]
|
||||
|
||||
#### `CONTAINER_NAME`
|
||||
|
||||
```python
|
||||
class CONTAINER_NAME(Container):
|
||||
FILED_NAME: SSZ_TYPE
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### [CATEGORY OF HELPERS]
|
||||
|
||||
```python
|
||||
<PYTHON HELPER FUNCTION>
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
|
||||
|
||||
### Block processing
|
||||
|
||||
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure <FORK_NAME> testing only.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
...
|
||||
```
|
||||
5
docs/light-client/.pages
Normal file
5
docs/light-client/.pages
Normal file
@@ -0,0 +1,5 @@
|
||||
nav:
|
||||
- 'Index': index.md
|
||||
- 'Altair': specs/altair/light-client/sync-protocol
|
||||
- 'Capella': specs/capella/light-client/sync-protocol
|
||||
- 'Deneb': specs/deneb/light-client/sync-protocol
|
||||
1
docs/light-client/index.md
Normal file
1
docs/light-client/index.md
Normal file
@@ -0,0 +1 @@
|
||||
# Light client specifications
|
||||
34
docs/stylesheets/extra.css
Normal file
34
docs/stylesheets/extra.css
Normal file
@@ -0,0 +1,34 @@
|
||||
/* Reference: https://zenn.dev/mebiusbox/articles/81d977a72cee01 */
|
||||
|
||||
[data-md-color-scheme=default] {
|
||||
--md-default-fg-color--light: #222 !important;
|
||||
}
|
||||
[data-md-color-scheme=slate] {
|
||||
--md-default-fg-color--light: #fefefe !important;
|
||||
--md-typeset-a-color: #fc0 !important;
|
||||
}
|
||||
|
||||
.md-typeset pre {
|
||||
color: #f8f8f2;
|
||||
}
|
||||
.md-typeset .highlighttable {
|
||||
margin-left:-20px;
|
||||
margin-right: -20px;
|
||||
border-radius: 0;
|
||||
}
|
||||
.md-typeset .highlighttable > * {
|
||||
--md-code-bg-color: #222 !important;
|
||||
--md-code-fg-color: #fefefe !important;
|
||||
}
|
||||
.md-typeset .highlighttable .linenos .linenodiv pre span {
|
||||
background-color: #222 !important;
|
||||
color: #fefefe !important;
|
||||
}
|
||||
.md-typeset .highlighttable .md-clipboard:before,
|
||||
.md-typeset .highlighttable .md-clipboard:after {
|
||||
color: rgba(240,240,240,.8);
|
||||
}
|
||||
.md-typeset .highlighttable .md-clipboard:hover:before,
|
||||
.md-typeset .highlighttable .md-clipboard:hover:after {
|
||||
color: rgba(102,217,224,1);
|
||||
}
|
||||
7
fork_choice/.pages
Normal file
7
fork_choice/.pages
Normal file
@@ -0,0 +1,7 @@
|
||||
nav:
|
||||
- ...
|
||||
- Fork Choice -- Core:
|
||||
- phase0: specs/phase0/fork-choice
|
||||
- bellatrix: specs/bellatrix/fork-choice
|
||||
- capella: specs/capella/fork-choice
|
||||
- deneb: specs/deneb/fork-choice
|
||||
@@ -15,7 +15,7 @@
|
||||
## Introduction
|
||||
|
||||
Under honest majority and certain network synchronicity assumptions
|
||||
there exist a block that is safe from re-orgs. Normally this block is
|
||||
there exists a block that is safe from re-orgs. Normally this block is
|
||||
pretty close to the head of canonical chain which makes it valuable
|
||||
to expose a safe block to users.
|
||||
|
||||
|
||||
40
mkdocs.yml
Normal file
40
mkdocs.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
site_name: Ethereum Consensus Specs
|
||||
site_url: https://ethereum.github.io/consensus-specs/
|
||||
repo_name: ethereum/consensus-specs
|
||||
theme:
|
||||
name: material
|
||||
palette:
|
||||
- scheme: default
|
||||
primary: black
|
||||
toggle:
|
||||
icon: material/brightness-7
|
||||
name: Switch to dark mode
|
||||
- scheme: slate
|
||||
primary: black
|
||||
toggle:
|
||||
icon: material/brightness-4
|
||||
name: Switch to light mode
|
||||
features:
|
||||
- navigation.tabs
|
||||
- search
|
||||
markdown_extensions:
|
||||
- toc:
|
||||
permalink: true
|
||||
- pymdownx.superfences
|
||||
- pymdownx.highlight:
|
||||
use_pygments: true
|
||||
noclasses: true
|
||||
pygments_style: monokai
|
||||
linenums: true
|
||||
anchor_linenums: true
|
||||
- mdx_truly_sane_lists:
|
||||
nested_indent: 4
|
||||
plugins:
|
||||
- search
|
||||
- awesome-pages
|
||||
extra_css:
|
||||
- stylesheets/extra.css
|
||||
extra:
|
||||
social:
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/ethereum/consensus-specs
|
||||
6
presets/mainnet/eip6110.yaml
Normal file
6
presets/mainnet/eip6110.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Mainnet preset - EIP6110
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
# 2**13 (= 8192) receipts
|
||||
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192
|
||||
@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
# Fork Choice
|
||||
# ---------------------------------------------------------------
|
||||
# 2**3 (= 8)
|
||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
||||
|
||||
|
||||
# Gwei values
|
||||
# ---------------------------------------------------------------
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
|
||||
6
presets/minimal/eip6110.yaml
Normal file
6
presets/minimal/eip6110.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
# Minimal preset - EIP6110
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 4
|
||||
@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
# Fork Choice
|
||||
# ---------------------------------------------------------------
|
||||
# 2**1 (= 1)
|
||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
|
||||
|
||||
|
||||
# Gwei values
|
||||
# ---------------------------------------------------------------
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
|
||||
46
setup.py
46
setup.py
@@ -47,6 +47,7 @@ ALTAIR = 'altair'
|
||||
BELLATRIX = 'bellatrix'
|
||||
CAPELLA = 'capella'
|
||||
DENEB = 'deneb'
|
||||
EIP6110 = 'eip6110'
|
||||
|
||||
|
||||
# The helper functions that are used when defining constants
|
||||
@@ -382,7 +383,7 @@ from typing import (
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64, uint256,
|
||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
|
||||
from eth2spec.utils import bls
|
||||
@@ -550,7 +551,7 @@ class BellatrixSpecBuilder(AltairSpecBuilder):
|
||||
return super().imports(preset_name) + f'''
|
||||
from typing import Protocol
|
||||
from eth2spec.altair import {preset_name} as altair
|
||||
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256
|
||||
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
@@ -587,7 +588,7 @@ class NoopExecutionEngine(ExecutionEngine):
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
pass
|
||||
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
# pylint: disable=unused-argument
|
||||
raise NotImplementedError("no default block production")
|
||||
|
||||
@@ -667,9 +668,22 @@ def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZ
|
||||
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
|
||||
|
||||
|
||||
#
|
||||
# EIP6110SpecBuilder
|
||||
#
|
||||
class EIP6110SpecBuilder(DenebSpecBuilder):
|
||||
fork: str = EIP6110
|
||||
|
||||
@classmethod
|
||||
def imports(cls, preset_name: str):
|
||||
return super().imports(preset_name) + f'''
|
||||
from eth2spec.deneb import {preset_name} as deneb
|
||||
'''
|
||||
|
||||
|
||||
spec_builders = {
|
||||
builder.fork: builder
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder)
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder)
|
||||
}
|
||||
|
||||
|
||||
@@ -968,14 +982,14 @@ class PySpecCommand(Command):
|
||||
if len(self.md_doc_paths) == 0:
|
||||
print("no paths were specified, using default markdown file paths for pyspec"
|
||||
" build (spec fork: %s)" % self.spec_fork)
|
||||
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB):
|
||||
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
"""
|
||||
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB):
|
||||
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/altair/light-client/full-node.md
|
||||
specs/altair/light-client/light-client.md
|
||||
@@ -987,7 +1001,7 @@ class PySpecCommand(Command):
|
||||
specs/altair/validator.md
|
||||
specs/altair/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB):
|
||||
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/bellatrix/beacon-chain.md
|
||||
specs/bellatrix/fork.md
|
||||
@@ -996,7 +1010,7 @@ class PySpecCommand(Command):
|
||||
specs/bellatrix/p2p-interface.md
|
||||
sync/optimistic.md
|
||||
"""
|
||||
if self.spec_fork in (CAPELLA, DENEB):
|
||||
if self.spec_fork in (CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/capella/light-client/fork.md
|
||||
specs/capella/light-client/full-node.md
|
||||
@@ -1008,7 +1022,7 @@ class PySpecCommand(Command):
|
||||
specs/capella/validator.md
|
||||
specs/capella/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork == DENEB:
|
||||
if self.spec_fork in (DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/deneb/light-client/fork.md
|
||||
specs/deneb/light-client/full-node.md
|
||||
@@ -1021,6 +1035,15 @@ class PySpecCommand(Command):
|
||||
specs/deneb/p2p-interface.md
|
||||
specs/deneb/validator.md
|
||||
"""
|
||||
if self.spec_fork == EIP6110:
|
||||
self.md_doc_paths += """
|
||||
specs/_features/eip6110/light-client/fork.md
|
||||
specs/_features/eip6110/light-client/full-node.md
|
||||
specs/_features/eip6110/light-client/p2p-interface.md
|
||||
specs/_features/eip6110/light-client/sync-protocol.md
|
||||
specs/_features/eip6110/beacon-chain.md
|
||||
specs/_features/eip6110/fork.md
|
||||
"""
|
||||
if len(self.md_doc_paths) == 0:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
|
||||
@@ -1157,11 +1180,12 @@ setup(
|
||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||
py_modules=["eth2spec"],
|
||||
cmdclass=commands,
|
||||
python_requires=">=3.8, <4",
|
||||
python_requires=">=3.9, <4",
|
||||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock", "pathos==0.3.0"],
|
||||
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
|
||||
},
|
||||
install_requires=[
|
||||
"eth-utils>=2.0.0,<3",
|
||||
|
||||
4
specs/.pages
Normal file
4
specs/.pages
Normal file
@@ -0,0 +1,4 @@
|
||||
nav:
|
||||
- phase0
|
||||
- ...
|
||||
- _features
|
||||
72
specs/_features/eip4788/beacon-chain.md
Normal file
72
specs/_features/eip4788/beacon-chain.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# EIP-4788 -- The Beacon Chain
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Containers](#containers)
|
||||
- [Extended Containers](#extended-containers)
|
||||
- [`ExecutionPayload`](#executionpayload)
|
||||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
TODO
|
||||
|
||||
## Containers
|
||||
|
||||
### Extended Containers
|
||||
|
||||
#### `ExecutionPayload`
|
||||
|
||||
```python
|
||||
class ExecutionPayload(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32 # 'difficulty' in the yellow paper
|
||||
block_number: uint64 # 'number' in the yellow paper
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
parent_beacon_block_root: Root # [New in EIP-4788]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
||||
```python
|
||||
class ExecutionPayloadHeader(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32
|
||||
block_number: uint64
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
parent_beacon_block_root: Root # [New in EIP-4788]
|
||||
```
|
||||
103
specs/_features/eip4788/validator.md
Normal file
103
specs/_features/eip4788/validator.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# EIP-4788 -- Honest Validator
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [Protocols](#protocols)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [ExecutionPayload](#executionpayload)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document represents the changes to be made in the code of an "honest validator" to implement the EIP-4788 feature.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Capella](../capella/beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Helpers
|
||||
|
||||
## Protocols
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### Modified `get_payload`
|
||||
|
||||
`get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type.
|
||||
|
||||
## Beacon chain responsibilities
|
||||
|
||||
All validator responsibilities remain unchanged other than those noted below.
|
||||
|
||||
### Block proposal
|
||||
|
||||
#### Constructing the `BeaconBlockBody`
|
||||
|
||||
##### ExecutionPayload
|
||||
|
||||
`ExecutionPayload`s are constructed as they were in Capella, except that the parent beacon block root is also supplied.
|
||||
|
||||
*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied.
|
||||
That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`.
|
||||
|
||||
*Note*: The only change made to `prepare_execution_payload` is to add the parent beacon block root as an additional
|
||||
parameter to the `PayloadAttributes`.
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
if not is_merge_transition_complete(state):
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
return None
|
||||
|
||||
terminal_pow_block = get_terminal_pow_block(pow_chain)
|
||||
if terminal_pow_block is None:
|
||||
# Pre-merge, no prepare payload call is needed
|
||||
return None
|
||||
# Signify merge via producing on top of the terminal PoW block
|
||||
parent_hash = terminal_pow_block.block_hash
|
||||
else:
|
||||
# Post-merge, normal payload
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_timestamp_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header), # [New in EIP-4788]
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
safe_block_hash=safe_block_hash,
|
||||
finalized_block_hash=finalized_block_hash,
|
||||
payload_attributes=payload_attributes,
|
||||
)
|
||||
```
|
||||
328
specs/_features/eip6110/beacon-chain.md
Normal file
328
specs/_features/eip6110/beacon-chain.md
Normal file
@@ -0,0 +1,328 @@
|
||||
# EIP-6110 -- The Beacon Chain
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Preset](#preset)
|
||||
- [Execution](#execution)
|
||||
- [Containers](#containers)
|
||||
- [New containers](#new-containers)
|
||||
- [`DepositReceipt`](#depositreceipt)
|
||||
- [Extended Containers](#extended-containers)
|
||||
- [`ExecutionPayload`](#executionpayload)
|
||||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `process_operations`](#modified-process_operations)
|
||||
- [New `process_deposit_receipt`](#new-process_deposit_receipt)
|
||||
- [Modified `process_execution_payload`](#modified-process_execution_payload)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This is the beacon chain specification of in-protocol deposits processing mechanism.
|
||||
This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110).
|
||||
|
||||
*Note:* This specification is built upon [Deneb](../../deneb/beacon-chain.md) and is under active development.
|
||||
|
||||
## Constants
|
||||
|
||||
The following values are (non-configurable) constants used throughout the specification.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` |
|
||||
|
||||
## Preset
|
||||
|
||||
### Execution
|
||||
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | Maximum number of deposit receipts allowed in each payload |
|
||||
|
||||
## Containers
|
||||
|
||||
### New containers
|
||||
|
||||
#### `DepositReceipt`
|
||||
|
||||
```python
|
||||
class DepositReceipt(Container):
|
||||
pubkey: BLSPubkey
|
||||
withdrawal_credentials: Bytes32
|
||||
amount: Gwei
|
||||
signature: BLSSignature
|
||||
index: uint64
|
||||
```
|
||||
|
||||
### Extended Containers
|
||||
|
||||
#### `ExecutionPayload`
|
||||
|
||||
```python
|
||||
class ExecutionPayload(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32
|
||||
block_number: uint64
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
excess_data_gas: uint256
|
||||
deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP6110]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
||||
```python
|
||||
class ExecutionPayloadHeader(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32
|
||||
block_number: uint64
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
excess_data_gas: uint256
|
||||
deposit_receipts_root: Root # [New in EIP6110]
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Participation
|
||||
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Inactivity
|
||||
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Sync
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Execution
|
||||
latest_execution_payload_header: ExecutionPayloadHeader # [Modified in EIP6110]
|
||||
# Withdrawals
|
||||
next_withdrawal_index: WithdrawalIndex
|
||||
next_withdrawal_validator_index: ValidatorIndex
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
|
||||
# [New in EIP6110]
|
||||
deposit_receipts_start_index: uint64
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP6110]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in EIP6110]
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(block.body)
|
||||
```
|
||||
|
||||
#### Modified `process_operations`
|
||||
|
||||
*Note*: The function `process_operations` is modified to process `DepositReceipt` operations included in the payload.
|
||||
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# [Modified in EIP6110]
|
||||
# Disable former deposit mechanism once all prior deposits are processed
|
||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
else:
|
||||
assert len(body.deposits) == 0
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
fn(state, operation)
|
||||
|
||||
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
for_ops(body.attester_slashings, process_attester_slashing)
|
||||
for_ops(body.attestations, process_attestation)
|
||||
for_ops(body.deposits, process_deposit)
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
|
||||
# [New in EIP6110]
|
||||
if is_execution_enabled(state, body):
|
||||
for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt)
|
||||
```
|
||||
|
||||
#### New `process_deposit_receipt`
|
||||
|
||||
```python
|
||||
def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None:
|
||||
# Set deposit receipt start index
|
||||
if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX:
|
||||
state.deposit_receipts_start_index = deposit_receipt.index
|
||||
|
||||
apply_deposit(
|
||||
state=state,
|
||||
pubkey=deposit_receipt.pubkey,
|
||||
withdrawal_credentials=deposit_receipt.withdrawal_credentials,
|
||||
amount=deposit_receipt.amount,
|
||||
signature=deposit_receipt.signature,
|
||||
)
|
||||
```
|
||||
|
||||
#### Modified `process_execution_payload`
|
||||
|
||||
*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify prev_randao
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipts_root=payload.receipts_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
prev_randao=payload.prev_randao,
|
||||
block_number=payload.block_number,
|
||||
gas_limit=payload.gas_limit,
|
||||
gas_used=payload.gas_used,
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
excess_data_gas=payload.excess_data_gas,
|
||||
deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP6110]
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only.
|
||||
Modifications include:
|
||||
1. Use `EIP6110_FORK_VERSION` as the previous and current fork version.
|
||||
2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
||||
3. Add `deposit_receipts_start_index` variable to the genesis state initialization.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] for testing only
|
||||
current_version=EIP6110_FORK_VERSION, # [Modified in EIP6110]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110]
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# Initialize the execution payload header
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
145
specs/_features/eip6110/fork.md
Normal file
145
specs/_features/eip6110/fork.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# EIP-6110 -- Fork Logic
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [Modified `compute_fork_version`](#modified-compute_fork_version)
|
||||
- [Fork to EIP-6110](#fork-to-eip-6110)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of EIP-6110 upgrade.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EIP6110_FORK_VERSION` | `Version('0x05000000')` |
|
||||
| `EIP6110_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### Modified `compute_fork_version`
|
||||
|
||||
```python
|
||||
def compute_fork_version(epoch: Epoch) -> Version:
|
||||
"""
|
||||
Return the fork version at the given ``epoch``.
|
||||
"""
|
||||
if epoch >= EIP6110_FORK_EPOCH:
|
||||
return EIP6110_FORK_VERSION
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return DENEB_FORK_VERSION
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
return CAPELLA_FORK_VERSION
|
||||
if epoch >= BELLATRIX_FORK_EPOCH:
|
||||
return BELLATRIX_FORK_VERSION
|
||||
if epoch >= ALTAIR_FORK_EPOCH:
|
||||
return ALTAIR_FORK_VERSION
|
||||
return GENESIS_FORK_VERSION
|
||||
```
|
||||
|
||||
## Fork to EIP-6110
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
|
||||
For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`.
|
||||
|
||||
Note that for the pure EIP-6110 networks, we don't apply `upgrade_to_eip6110` since it starts with EIP-6110 version logic.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6110_FORK_EPOCH`,
|
||||
an irregular state change is made to upgrade to EIP-6110.
|
||||
|
||||
```python
|
||||
def upgrade_to_eip6110(pre: deneb.BeaconState) -> BeaconState:
|
||||
epoch = deneb.get_current_epoch(pre)
|
||||
latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||
fee_recipient=pre.latest_execution_payload_header.fee_recipient,
|
||||
state_root=pre.latest_execution_payload_header.state_root,
|
||||
receipts_root=pre.latest_execution_payload_header.receipts_root,
|
||||
logs_bloom=pre.latest_execution_payload_header.logs_bloom,
|
||||
prev_randao=pre.latest_execution_payload_header.prev_randao,
|
||||
block_number=pre.latest_execution_payload_header.block_number,
|
||||
gas_limit=pre.latest_execution_payload_header.gas_limit,
|
||||
gas_used=pre.latest_execution_payload_header.gas_used,
|
||||
timestamp=pre.latest_execution_payload_header.timestamp,
|
||||
extra_data=pre.latest_execution_payload_header.extra_data,
|
||||
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
excess_data_gas=uint256(0),
|
||||
deposit_receipts_root=Root(), # [New in EIP-6110]
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP6110_FORK_VERSION, # [Modified in EIP-6110]
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP-6110]
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
# EIP-6110
|
||||
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110]
|
||||
)
|
||||
|
||||
return post
|
||||
```
|
||||
112
specs/_features/eip6110/light-client/fork.md
Normal file
112
specs/_features/eip6110/light-client/fork.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# EIP-6110 Light Client -- Fork Logic
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Upgrading light client data](#upgrading-light-client-data)
|
||||
- [Upgrading the store](#upgrading-the-store)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to upgrade existing light client objects based on the [Deneb specification](../../deneb/light-client/sync-protocol.md) to eip6110. This is necessary when processing pre-eip6110 data with a post-eip6110 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
|
||||
|
||||
### Upgrading light client data
|
||||
|
||||
A eip6110 `LightClientStore` can still process earlier light client data. In order to do so, that pre-eip6110 data needs to be locally upgraded to eip6110 before processing.
|
||||
|
||||
```python
|
||||
def upgrade_lc_header_to_eip6110(pre: deneb.LightClientHeader) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=pre.beacon,
|
||||
execution=ExecutionPayloadHeader(
|
||||
parent_hash=pre.execution.parent_hash,
|
||||
fee_recipient=pre.execution.fee_recipient,
|
||||
state_root=pre.execution.state_root,
|
||||
receipts_root=pre.execution.receipts_root,
|
||||
logs_bloom=pre.execution.logs_bloom,
|
||||
prev_randao=pre.execution.prev_randao,
|
||||
block_number=pre.execution.block_number,
|
||||
gas_limit=pre.execution.gas_limit,
|
||||
gas_used=pre.execution.gas_used,
|
||||
timestamp=pre.execution.timestamp,
|
||||
extra_data=pre.execution.extra_data,
|
||||
base_fee_per_gas=pre.execution.base_fee_per_gas,
|
||||
block_hash=pre.execution.block_hash,
|
||||
transactions_root=pre.execution.transactions_root,
|
||||
withdrawals_root=pre.execution.withdrawals_root,
|
||||
excess_data_gas=pre.execution.excess_data_gas,
|
||||
deposit_receipts_root=Root(), # [New in EIP6110]
|
||||
),
|
||||
execution_branch=pre.execution_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_bootstrap_to_eip6110(pre: deneb.LightClientBootstrap) -> LightClientBootstrap:
|
||||
return LightClientBootstrap(
|
||||
header=upgrade_lc_header_to_eip6110(pre.header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
current_sync_committee_branch=pre.current_sync_committee_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_update_to_eip6110(pre: deneb.LightClientUpdate) -> LightClientUpdate:
|
||||
return LightClientUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
next_sync_committee_branch=pre.next_sync_committee_branch,
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_eip6110(pre: deneb.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_eip6110(pre: deneb.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
### Upgrading the store
|
||||
|
||||
Existing `LightClientStore` objects based on Deneb MUST be upgraded to eip6110 before eip6110 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `EIP6110_FORK_EPOCH`.
|
||||
|
||||
```python
|
||||
def upgrade_lc_store_to_eip6110(pre: deneb.LightClientStore) -> LightClientStore:
|
||||
if pre.best_valid_update is None:
|
||||
best_valid_update = None
|
||||
else:
|
||||
best_valid_update = upgrade_lc_update_to_eip6110(pre.best_valid_update)
|
||||
return LightClientStore(
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
best_valid_update=best_valid_update,
|
||||
optimistic_header=upgrade_lc_header_to_eip6110(pre.optimistic_header),
|
||||
previous_max_active_participants=pre.previous_max_active_participants,
|
||||
current_max_active_participants=pre.current_max_active_participants,
|
||||
)
|
||||
```
|
||||
77
specs/_features/eip6110/light-client/full-node.md
Normal file
77
specs/_features/eip6110/light-client/full-node.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# EIP-6110 Light Client -- Full Node
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds information about the execution payload to light client data as part of the EIP-6110 upgrade.
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
epoch = compute_epoch_at_slot(block.message.slot)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
payload = block.message.body.execution_payload
|
||||
execution_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipts_root=payload.receipts_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
prev_randao=payload.prev_randao,
|
||||
block_number=payload.block_number,
|
||||
gas_limit=payload.gas_limit,
|
||||
gas_used=payload.gas_used,
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
execution_header.excess_data_gas = payload.excess_data_gas
|
||||
|
||||
# [New in EIP6110]
|
||||
if epoch >= EIP6110_FORK_EPOCH:
|
||||
execution_header.deposit_receipts_root = hash_tree_root(payload.deposit_receipts)
|
||||
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
execution=execution_header,
|
||||
execution_branch=execution_branch,
|
||||
)
|
||||
```
|
||||
111
specs/_features/eip6110/light-client/p2p-interface.md
Normal file
111
specs/_features/eip6110/light-client/p2p-interface.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# EIP-6110 Light Client -- Networking
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Networking](#networking)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`light_client_finality_update`](#light_client_finality_update)
|
||||
- [`light_client_optimistic_update`](#light_client_optimistic_update)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [GetLightClientBootstrap](#getlightclientbootstrap)
|
||||
- [LightClientUpdatesByRange](#lightclientupdatesbyrange)
|
||||
- [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
|
||||
- [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Networking
|
||||
|
||||
The [Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) is extended to exchange [EIP-6110 light client data](./sync-protocol.md).
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
#### Topics and messages
|
||||
|
||||
##### Global topics
|
||||
|
||||
###### `light_client_finality_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` |
|
||||
|
||||
###### `light_client_optimistic_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` |
|
||||
|
||||
### The Req/Resp domain
|
||||
|
||||
#### Messages
|
||||
|
||||
##### GetLightClientBootstrap
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientBootstrap` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientBootstrap` |
|
||||
|
||||
##### LightClientUpdatesByRange
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response chunk SSZ type |
|
||||
|--------------------------------------------------------|----------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientUpdate` |
|
||||
|
||||
##### GetLightClientFinalityUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` |
|
||||
|
||||
##### GetLightClientOptimisticUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` |
|
||||
89
specs/_features/eip6110/light-client/sync-protocol.md
Normal file
89
specs/_features/eip6110/light-client/sync-protocol.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# EIP-6110 Light Client -- Sync Protocol
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `get_lc_execution_root`](#modified-get_lc_execution_root)
|
||||
- [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade updates light client data to include the EIP-6110 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Deneb based deployments to EIP-6110.
|
||||
|
||||
Additional documents describes the impact of the upgrade on certain roles:
|
||||
- [Full node](./full-node.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `get_lc_execution_root`
|
||||
|
||||
```python
|
||||
def get_lc_execution_root(header: LightClientHeader) -> Root:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return hash_tree_root(header.execution)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
execution_header = capella.ExecutionPayloadHeader(
|
||||
parent_hash=header.execution.parent_hash,
|
||||
fee_recipient=header.execution.fee_recipient,
|
||||
state_root=header.execution.state_root,
|
||||
receipts_root=header.execution.receipts_root,
|
||||
logs_bloom=header.execution.logs_bloom,
|
||||
prev_randao=header.execution.prev_randao,
|
||||
block_number=header.execution.block_number,
|
||||
gas_limit=header.execution.gas_limit,
|
||||
gas_used=header.execution.gas_used,
|
||||
timestamp=header.execution.timestamp,
|
||||
extra_data=header.execution.extra_data,
|
||||
base_fee_per_gas=header.execution.base_fee_per_gas,
|
||||
block_hash=header.execution.block_hash,
|
||||
transactions_root=header.execution.transactions_root,
|
||||
withdrawals_root=header.execution.withdrawals_root,
|
||||
)
|
||||
return hash_tree_root(execution_header)
|
||||
|
||||
return Root()
|
||||
```
|
||||
|
||||
### Modified `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
# [New in EIP-6110]
|
||||
if epoch < EIP6110_FORK_EPOCH:
|
||||
if header.execution.deposit_receipts_root != Root():
|
||||
return False
|
||||
|
||||
if epoch < DENEB_FORK_EPOCH:
|
||||
if header.execution.excess_data_gas != uint256(0):
|
||||
return False
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
42
specs/_features/eip6110/validator.md
Normal file
42
specs/_features/eip6110/validator.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# EIP-6110 -- Honest Validator
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Deposits](#deposits)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document represents the changes to be made in the code of an "honest validator" to implement EIP-6110.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Deneb -- Honest Validator](../../deneb/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Block proposal
|
||||
|
||||
### Deposits
|
||||
|
||||
The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function:
|
||||
|
||||
```python
|
||||
def get_eth1_deposit_count(state: BeaconState) -> uint64:
|
||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
else:
|
||||
return uint64(0)
|
||||
```
|
||||
65
specs/_features/eip6914/beacon-chain.md
Normal file
65
specs/_features/eip6914/beacon-chain.md
Normal file
@@ -0,0 +1,65 @@
|
||||
EIP-6914 -- The Beacon Chain
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Preset](#preset)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Predicates](#predicates)
|
||||
- [`is_reusable_validator`](#is_reusable_validator)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `get_index_for_new_validator`](#modified-get_index_for_new_validator)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This is the beacon chain specification to assign new deposits to existing validator records. Refers to [EIP-6914](https://github.com/ethereum/EIPs/pull/6914).
|
||||
|
||||
*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development.
|
||||
|
||||
## Preset
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | - | - |
|
||||
| `SAFE_EPOCHS_TO_REUSE_INDEX` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Predicates
|
||||
|
||||
#### `is_reusable_validator`
|
||||
|
||||
```python
|
||||
def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool:
|
||||
"""
|
||||
Check if ``validator`` index can be re-assigned to a new deposit.
|
||||
"""
|
||||
return (
|
||||
epoch > validator.withdrawable_epoch + SAFE_EPOCHS_TO_REUSE_INDEX
|
||||
and balance == 0
|
||||
)
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Block processing
|
||||
|
||||
#### Modified `get_index_for_new_validator`
|
||||
|
||||
```python
|
||||
def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex:
|
||||
for index, validator in enumerate(state.validators):
|
||||
if is_reusable_validator(validator, state.balances[index], get_current_epoch(state)):
|
||||
return ValidatorIndex(index)
|
||||
return ValidatorIndex(len(state.validators))
|
||||
```
|
||||
@@ -47,7 +47,7 @@ Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interfa
|
||||
| `shard_column_{subnet_id}` | `SignedShardSample` |
|
||||
| `builder_block_bid` | `BuilderBlockBid` |
|
||||
|
||||
The [DAS network specification](./das-p2p.md) defines additional topics.
|
||||
The [DAS network specification](../das/das-core.md) defines additional topics.
|
||||
|
||||
#### Builder block bid
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@
|
||||
- [Misc](#misc-1)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
- [`get_index_for_new_validator`](#get_index_for_new_validator)
|
||||
- [`set_or_append_list`](#set_or_append_list)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
|
||||
- [`get_next_sync_committee`](#get_next_sync_committee)
|
||||
@@ -43,7 +45,7 @@
|
||||
- [Modified `slash_validator`](#modified-slash_validator)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `process_attestation`](#modified-process_attestation)
|
||||
- [Modified `process_deposit`](#modified-process_deposit)
|
||||
- [Modified `apply_deposit`](#modified-apply_deposit)
|
||||
- [Sync aggregate processing](#sync-aggregate-processing)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Justification and finalization](#justification-and-finalization)
|
||||
@@ -248,6 +250,23 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
return flags & flag == flag
|
||||
```
|
||||
|
||||
#### `get_index_for_new_validator`
|
||||
|
||||
```python
|
||||
def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex:
|
||||
return ValidatorIndex(len(state.validators))
|
||||
```
|
||||
|
||||
#### `set_or_append_list`
|
||||
|
||||
```python
|
||||
def set_or_append_list(list: List, index: ValidatorIndex, value: Any) -> None:
|
||||
if index == len(list):
|
||||
list.append(value)
|
||||
else:
|
||||
list[index] = value
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_next_sync_committee_indices`
|
||||
@@ -489,44 +508,36 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
|
||||
#### Modified `process_deposit`
|
||||
#### Modified `apply_deposit`
|
||||
|
||||
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
|
||||
*Note*: The function `apply_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Verify the Merkle branch
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
||||
pubkey = deposit.data.pubkey
|
||||
amount = deposit.data.amount
|
||||
def apply_deposit(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> None:
|
||||
validator_pubkeys = [validator.pubkey for validator in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
# Initialize validator if the deposit signature is valid
|
||||
if bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
state.validators.append(get_validator_from_deposit(deposit))
|
||||
state.balances.append(amount)
|
||||
if bls.Verify(pubkey, signing_root, signature):
|
||||
index = get_index_for_new_validator(state)
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.validators, index, validator)
|
||||
set_or_append_list(state.balances, index, amount)
|
||||
# [New in Altair]
|
||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(uint64(0))
|
||||
set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
|
||||
@@ -387,7 +387,8 @@ def validate_light_client_update(store: LightClientStore,
|
||||
pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
|
||||
if bit
|
||||
]
|
||||
fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
|
||||
fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1)
|
||||
fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot))
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
|
||||
signing_root = compute_signing_root(update.attested_header.beacon, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
|
||||
@@ -13,7 +13,7 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Warning](#warning)
|
||||
- [Warning](#warning)
|
||||
- [Modifications in Altair](#modifications-in-altair)
|
||||
- [MetaData](#metadata)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
@@ -43,9 +43,9 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
|
||||
This document is currently illustrative for early Altair testnets and some parts are subject to change.
|
||||
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||
|
||||
# Modifications in Altair
|
||||
## Modifications in Altair
|
||||
|
||||
## MetaData
|
||||
### MetaData
|
||||
|
||||
The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions:
|
||||
|
||||
@@ -62,12 +62,12 @@ Where
|
||||
- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document.
|
||||
- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability).
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
|
||||
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
|
||||
|
||||
### Topics and messages
|
||||
#### Topics and messages
|
||||
|
||||
Topics follow the same specification as in the Phase 0 document.
|
||||
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
|
||||
@@ -103,11 +103,11 @@ Definitions of these new types can be found in the [Altair validator guide](./va
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
##### Global topics
|
||||
|
||||
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks.
|
||||
|
||||
##### `beacon_block`
|
||||
###### `beacon_block`
|
||||
|
||||
The existing specification for this topic does not change from the Phase 0 document,
|
||||
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
|
||||
@@ -115,7 +115,7 @@ This type changes due to the inclusion of the inner `BeaconBlockBody` that is mo
|
||||
|
||||
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
|
||||
|
||||
##### `sync_committee_contribution_and_proof`
|
||||
###### `sync_committee_contribution_and_proof`
|
||||
|
||||
This topic is used to propagate partially aggregated sync committee messages to be included in future blocks.
|
||||
|
||||
@@ -152,11 +152,11 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
||||
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
|
||||
|
||||
#### Sync committee subnets
|
||||
##### Sync committee subnets
|
||||
|
||||
Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network.
|
||||
|
||||
##### `sync_committee_{subnet_id}`
|
||||
###### `sync_committee_{subnet_id}`
|
||||
|
||||
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
@@ -170,7 +170,7 @@ The following validations MUST pass before forwarding the `sync_committee_messag
|
||||
Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct.
|
||||
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||
|
||||
#### Sync committees and aggregation
|
||||
##### Sync committees and aggregation
|
||||
|
||||
The aggregation scheme closely follows the design of the attestation aggregation scheme.
|
||||
Sync committee messages are broadcast into "subnets" defined by a topic.
|
||||
@@ -182,7 +182,7 @@ Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s
|
||||
|
||||
Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
### Transitioning the gossip
|
||||
#### Transitioning the gossip
|
||||
|
||||
With any fork, the fork version, and thus the `ForkDigestValue`, change.
|
||||
Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics.
|
||||
@@ -205,9 +205,9 @@ Post-fork:
|
||||
E.g. an attestation on the both the old and new topic is ignored like any duplicate.
|
||||
- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`.
|
||||
|
||||
## The Req/Resp domain
|
||||
### The Req/Resp domain
|
||||
|
||||
### Req-Resp interaction
|
||||
#### Req-Resp interaction
|
||||
|
||||
An additional `<context-bytes>` field is introduced to the `response_chunk` as defined in the Phase 0 document:
|
||||
|
||||
@@ -221,7 +221,7 @@ On a non-zero `<result>` with `ErrorMessage` payload, the `<context-bytes>` is a
|
||||
In Altair and later forks, `<context-bytes>` functions as a short meta-data,
|
||||
defined per req-resp method, and can parametrize the payload decoder.
|
||||
|
||||
#### `ForkDigest`-context
|
||||
##### `ForkDigest`-context
|
||||
|
||||
Starting with Altair, and in future forks, SSZ type definitions may change.
|
||||
For this common case, we define the `ForkDigest`-context:
|
||||
@@ -229,9 +229,9 @@ For this common case, we define the `ForkDigest`-context:
|
||||
A fixed-width 4 byte `<context-bytes>`, set to the `ForkDigest` matching the chunk:
|
||||
`compute_fork_digest(fork_version, genesis_validators_root)`.
|
||||
|
||||
### Messages
|
||||
#### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
##### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
@@ -246,7 +246,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
##### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
@@ -261,7 +261,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
|
||||
#### GetMetaData v2
|
||||
##### GetMetaData v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/metadata/2/`
|
||||
|
||||
@@ -279,7 +279,7 @@ Requests the MetaData of a peer, using the new `MetaData` definition given above
|
||||
that is extended from phase 0 in Altair. Other conditions for the `GetMetaData`
|
||||
protocol are unchanged from the phase 0 p2p networking document.
|
||||
|
||||
### Transitioning from v1 to v2
|
||||
#### Transitioning from v1 to v2
|
||||
|
||||
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
|
||||
This is non-breaking, and is recommended as soon as the fork specification is stable.
|
||||
@@ -291,7 +291,7 @@ The v1 method MAY be unregistered at the fork boundary.
|
||||
In the event of a request on v1 for an Altair specific payload,
|
||||
the responder MUST return the **InvalidRequest** response code.
|
||||
|
||||
## The discovery domain: discv5
|
||||
### The discovery domain: discv5
|
||||
|
||||
The `attnets` key of the ENR is used as defined in the Phase 0 document.
|
||||
|
||||
|
||||
@@ -170,10 +170,16 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert block.slot > finalized_slot
|
||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block.parent_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
block_root = hash_tree_root(block)
|
||||
state_transition(state, signed_block, True)
|
||||
|
||||
# [New in Bellatrix]
|
||||
@@ -181,9 +187,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
validate_merge_block(block)
|
||||
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
store.blocks[block_root] = block
|
||||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
@@ -191,15 +197,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
|
||||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Eagerly compute unrealized justification and finality.
|
||||
compute_pulled_up_tip(store, block_root)
|
||||
```
|
||||
|
||||
@@ -13,23 +13,23 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Warning](#warning)
|
||||
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
||||
- [Configuration](#configuration)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
||||
- [Configuration](#configuration)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Gossipsub](#gossipsub)
|
||||
- [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix)
|
||||
- [Req/Resp](#reqresp)
|
||||
- [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix)
|
||||
- [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network)
|
||||
- [Gossipsub](#gossipsub)
|
||||
- [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix)
|
||||
- [Req/Resp](#reqresp)
|
||||
- [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix)
|
||||
- [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@@ -39,9 +39,9 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||
This document is currently illustrative for early Bellatrix testnets and some parts are subject to change.
|
||||
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||
|
||||
# Modifications in Bellatrix
|
||||
## Modifications in Bellatrix
|
||||
|
||||
## Configuration
|
||||
### Configuration
|
||||
|
||||
This section outlines modifications constants that are used in this spec.
|
||||
|
||||
@@ -50,11 +50,11 @@ This section outlines modifications constants that are used in this spec.
|
||||
| `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. |
|
||||
| `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. |
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in Bellatrix to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
#### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
All topics remain stable except the beacon block topic which is updated with the modified type.
|
||||
@@ -76,11 +76,11 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
##### Global topics
|
||||
|
||||
Bellatrix changes the type of the global beacon block topic.
|
||||
|
||||
##### `beacon_block`
|
||||
###### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Bellatrix.
|
||||
Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`.
|
||||
@@ -107,12 +107,12 @@ Alias `block = signed_beacon_block.message`, `execution_payload = block.body.exe
|
||||
The following gossip validation from prior specifications MUST NOT be applied if the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)`:
|
||||
- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
|
||||
|
||||
### Transitioning the gossip
|
||||
#### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics.
|
||||
|
||||
## The Req/Resp domain
|
||||
### The Req/Resp domain
|
||||
|
||||
Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which
|
||||
result in an INVALID response from an execution engine. To prevent network
|
||||
@@ -122,9 +122,9 @@ down-scored or disconnected. Transmission of a block which is invalid due to
|
||||
any consensus layer rules (i.e., *not* execution layer rules) MAY result in
|
||||
down-scoring or disconnection.
|
||||
|
||||
### Messages
|
||||
#### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
##### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
@@ -146,7 +146,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
##### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
@@ -165,9 +165,9 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
# Design decision rationale
|
||||
|
||||
## Gossipsub
|
||||
### Gossipsub
|
||||
|
||||
### Why was the max gossip message size increased at Bellatrix?
|
||||
#### Why was the max gossip message size increased at Bellatrix?
|
||||
|
||||
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
|
||||
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
|
||||
@@ -190,9 +190,9 @@ order of 128 KiB in the worst case and the current gas limit (~30M) bounds max b
|
||||
than 2 MiB today, this marginal difference in theoretical bounds will have zero
|
||||
impact on network functionality and security.
|
||||
|
||||
## Req/Resp
|
||||
### Req/Resp
|
||||
|
||||
### Why was the max chunk response size increased at Bellatrix?
|
||||
#### Why was the max chunk response size increased at Bellatrix?
|
||||
|
||||
Similar to the discussion about the maximum gossip size increase, the
|
||||
`ExecutionPayload` type can cause `BeaconBlock`s to exceed the 1 MiB bounds put
|
||||
@@ -204,7 +204,7 @@ valid block sizes in the range of gas limits expected in the medium term.
|
||||
As with both gossip and req/rsp maximum values, type-specific limits should
|
||||
always by simultaneously respected.
|
||||
|
||||
### Why allow invalid payloads on the P2P network?
|
||||
#### Why allow invalid payloads on the P2P network?
|
||||
|
||||
The specification allows blocks with invalid execution payloads to propagate across
|
||||
gossip and via RPC calls. The reasoning for this is as follows:
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [`GetPayloadResponse`](#getpayloadresponse)
|
||||
- [`get_pow_block_at_terminal_total_difficulty`](#get_pow_block_at_terminal_total_difficulty)
|
||||
- [`get_terminal_pow_block`](#get_terminal_pow_block)
|
||||
- [Protocols](#protocols)
|
||||
@@ -36,6 +37,14 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
## Helpers
|
||||
|
||||
### `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
```
|
||||
|
||||
### `get_pow_block_at_terminal_total_difficulty`
|
||||
|
||||
```python
|
||||
@@ -83,13 +92,13 @@ The Engine API may be used to implement it with an external execution engine.
|
||||
|
||||
#### `get_payload`
|
||||
|
||||
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
|
||||
has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most recent version of
|
||||
the execution payload that has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
|
||||
```python
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
"""
|
||||
Return ``execution_payload`` object.
|
||||
Return ``GetPayloadResponse`` object.
|
||||
"""
|
||||
...
|
||||
```
|
||||
@@ -162,7 +171,7 @@ def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: Exe
|
||||
# Pre-merge, empty payload
|
||||
return ExecutionPayload()
|
||||
else:
|
||||
return execution_engine.get_payload(payload_id)
|
||||
return execution_engine.get_payload(payload_id).execution_payload
|
||||
```
|
||||
|
||||
*Note*: It is recommended for a validator to call `prepare_execution_payload` as soon as input parameters become known,
|
||||
|
||||
@@ -27,7 +27,7 @@ Warning: this configuration is not definitive.
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `CAPELLA_FORK_VERSION` | `Version('0x03000000')` |
|
||||
| `CAPELLA_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
| `CAPELLA_FORK_EPOCH` | `Epoch(194048)` (April 12, 2023, 10:27:35pm UTC) |
|
||||
|
||||
|
||||
## Helper functions
|
||||
|
||||
@@ -4,7 +4,7 @@ This document contains the networking specification for Capella.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
### Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@@ -26,13 +26,13 @@ The specification of these changes continues in the same format as the network s
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
# Modifications in Capella
|
||||
## Modifications in Capella
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella.
|
||||
|
||||
### Topics and messages
|
||||
#### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type.
|
||||
|
||||
@@ -45,17 +45,17 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
##### Global topics
|
||||
|
||||
Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks.
|
||||
|
||||
##### `beacon_block`
|
||||
###### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella.
|
||||
Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`.
|
||||
See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details.
|
||||
|
||||
##### `bls_to_execution_change`
|
||||
###### `bls_to_execution_change`
|
||||
|
||||
This topic is used to propagate signed bls to execution change messages to be included in future blocks.
|
||||
|
||||
@@ -67,16 +67,16 @@ The following validations MUST pass before forwarding the `signed_bls_to_executi
|
||||
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
||||
|
||||
### Transitioning the gossip
|
||||
#### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for Capella.
|
||||
|
||||
## The Req/Resp domain
|
||||
### The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
#### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
##### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
@@ -93,7 +93,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
##### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
|
||||
@@ -11,9 +11,10 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
|
||||
- [Protocols](#protocols)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [`get_payload`](#get_payload)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
@@ -39,11 +40,20 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
## Helpers
|
||||
|
||||
### Modified `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
block_value: uint256
|
||||
```
|
||||
|
||||
## Protocols
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### `get_payload`
|
||||
#### Modified `get_payload`
|
||||
|
||||
`get_payload` returns the upgraded Capella `ExecutionPayload` type.
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLOB_TX_TYPE` | `uint8(0x05)` |
|
||||
| `BLOB_TX_TYPE` | `uint8(0x03)` |
|
||||
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
|
||||
|
||||
## Preset
|
||||
@@ -173,6 +173,8 @@ def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedH
|
||||
message_offset
|
||||
+ uint32.decode_bytes(opaque_tx[(message_offset + 188):(message_offset + 192)])
|
||||
)
|
||||
# `VersionedHash` is a 32-byte object
|
||||
assert (len(opaque_tx) - blob_versioned_hashes_offset) % 32 == 0
|
||||
return [
|
||||
VersionedHash(opaque_tx[x:(x + 32)])
|
||||
for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32)
|
||||
@@ -205,7 +207,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in Deneb]
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(state, block.body) # [New in Deneb]
|
||||
process_blob_kzg_commitments(block.body) # [New in Deneb]
|
||||
```
|
||||
|
||||
#### Execution payload
|
||||
@@ -290,8 +292,7 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||
#### Blob KZG commitments
|
||||
|
||||
```python
|
||||
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# pylint: disable=unused-argument
|
||||
def process_blob_kzg_commitments(body: BeaconBlockBody) -> None:
|
||||
assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
```
|
||||
|
||||
|
||||
@@ -30,9 +30,8 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
|
||||
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs: Sequence[Blob],
|
||||
proofs: Sequence[KZGProof]) -> None:
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
assert len(blobs) == len(proofs)
|
||||
|
||||
assert len(expected_kzg_commitments) == len(blobs) == len(proofs)
|
||||
|
||||
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
|
||||
```
|
||||
|
||||
@@ -82,7 +81,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert block.slot > finalized_slot
|
||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block.parent_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# [New in Deneb]
|
||||
# Check if blob data is available
|
||||
@@ -91,6 +95,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
block_root = hash_tree_root(block)
|
||||
state_transition(state, signed_block, True)
|
||||
|
||||
# Check the merge transition
|
||||
@@ -98,9 +103,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
validate_merge_block(block)
|
||||
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
store.blocks[block_root] = block
|
||||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
@@ -108,15 +113,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
|
||||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Eagerly compute unrealized justification and finality.
|
||||
compute_pulled_up_tip(store, block_root)
|
||||
```
|
||||
|
||||
@@ -41,6 +41,7 @@ def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHea
|
||||
block_hash=pre.execution.block_hash,
|
||||
transactions_root=pre.execution.transactions_root,
|
||||
withdrawals_root=pre.execution.withdrawals_root,
|
||||
excess_data_gas=uint256(0), # [New in Deneb]
|
||||
),
|
||||
execution_branch=pre.execution_branch,
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade updates light client data to include the Denbeb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb.
|
||||
This upgrade updates light client data to include the Deneb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb.
|
||||
|
||||
Additional documents describes the impact of the upgrade on certain roles:
|
||||
- [Full node](./full-node.md)
|
||||
|
||||
@@ -10,30 +10,35 @@ The specification of these changes continues in the same format as the network s
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobSidecar`](#blobsidecar)
|
||||
- [`SignedBlobSidecar`](#signedblobsidecar)
|
||||
- [`BlobIdentifier`](#blobidentifier)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
|
||||
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
|
||||
- [Modifications in Deneb](#modifications-in-deneb)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobSidecar`](#blobsidecar)
|
||||
- [`SignedBlobSidecar`](#signedblobsidecar)
|
||||
- [`BlobIdentifier`](#blobidentifier)
|
||||
- [Helpers](#helpers)
|
||||
- [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
|
||||
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Configuration
|
||||
## Modifications in Deneb
|
||||
|
||||
### Configuration
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||
@@ -41,9 +46,9 @@ The specification of these changes continues in the same format as the network s
|
||||
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
||||
|
||||
## Containers
|
||||
### Containers
|
||||
|
||||
### `BlobSidecar`
|
||||
#### `BlobSidecar`
|
||||
|
||||
```python
|
||||
class BlobSidecar(Container):
|
||||
@@ -57,7 +62,7 @@ class BlobSidecar(Container):
|
||||
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
|
||||
```
|
||||
|
||||
### `SignedBlobSidecar`
|
||||
#### `SignedBlobSidecar`
|
||||
|
||||
```python
|
||||
class SignedBlobSidecar(Container):
|
||||
@@ -65,7 +70,7 @@ class SignedBlobSidecar(Container):
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `BlobIdentifier`
|
||||
#### `BlobIdentifier`
|
||||
|
||||
```python
|
||||
class BlobIdentifier(Container):
|
||||
@@ -73,11 +78,22 @@ class BlobIdentifier(Container):
|
||||
index: BlobIndex
|
||||
```
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
#### Helpers
|
||||
|
||||
##### `verify_blob_sidecar_signature`
|
||||
|
||||
```python
|
||||
def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
|
||||
proposer = state.validators[signed_blob_sidecar.message.proposer_index]
|
||||
signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR))
|
||||
return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
|
||||
```
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
#### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
|
||||
@@ -91,43 +107,44 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
||||
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` (new) |
|
||||
|
||||
#### Global topics
|
||||
##### Global topics
|
||||
|
||||
Deneb introduces new global topics for blob sidecars.
|
||||
|
||||
##### `beacon_block`
|
||||
###### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
||||
|
||||
##### `blob_sidecar_{index}`
|
||||
###### `blob_sidecar_{subnet_id}`
|
||||
|
||||
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
||||
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
|
||||
|
||||
The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
|
||||
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation.
|
||||
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey.
|
||||
- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`).
|
||||
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_blob_sidecar_signature`.
|
||||
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
|
||||
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
|
||||
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
|
||||
### Transitioning the gossip
|
||||
#### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for this upgrade.
|
||||
|
||||
## The Req/Resp domain
|
||||
### The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
#### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
##### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
@@ -147,7 +164,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
##### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
@@ -165,7 +182,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
#### BlobSidecarsByRoot v1
|
||||
##### BlobSidecarsByRoot v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
|
||||
|
||||
@@ -209,12 +226,12 @@ No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response.
|
||||
|
||||
Clients MUST respond with at least one sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
||||
#### BlobSidecarsByRange v1
|
||||
##### BlobSidecarsByRange v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
|
||||
| `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point |
|
||||
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
|
||||
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form |
|
||||
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data |
|
||||
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic data blob |
|
||||
|
||||
## Constants
|
||||
|
||||
@@ -105,7 +105,7 @@ but reusing the `mainnet` settings in public networks is a critical security req
|
||||
| `KZG_SETUP_G2_LENGTH` | `65` |
|
||||
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
|
||||
## Helper functions
|
||||
|
||||
@@ -252,10 +252,11 @@ def compute_challenge(blob: Blob,
|
||||
```python
|
||||
def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Compute the modular inverse of x
|
||||
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
|
||||
Compute the modular inverse of x (for x != 0)
|
||||
i.e. return y such that x * y % BLS_MODULUS == 1
|
||||
"""
|
||||
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
|
||||
assert (int(x) % BLS_MODULUS) != 0
|
||||
return BLSFieldElement(pow(x, -1, BLS_MODULUS))
|
||||
```
|
||||
|
||||
#### `div`
|
||||
@@ -565,7 +566,7 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
"""
|
||||
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
|
||||
|
||||
Will return True if there are zero blobs/commitments/proofs.
|
||||
Public method.
|
||||
"""
|
||||
|
||||
|
||||
@@ -10,8 +10,14 @@
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Helpers](#helpers)
|
||||
- [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments)
|
||||
- [`BlobsBundle`](#blobsbundle)
|
||||
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
|
||||
- [Protocol](#protocol)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block and sidecar proposal](#block-and-sidecar-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
@@ -34,17 +40,50 @@ All behaviors and definitions defined in this document, and documents it extends
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Constants
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `BLOB_SIDECAR_SUBNET_COUNT` | `4` | The number of blob sidecar subnets used in the gossipsub protocol. |
|
||||
|
||||
## Helpers
|
||||
|
||||
### `get_blobs_and_kzg_commitments`
|
||||
|
||||
The interface to retrieve blobs and corresponding kzg commitments.
|
||||
|
||||
Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` may be unified.
|
||||
Implementers may also retrieve blobs individually per transaction.
|
||||
### `BlobsBundle`
|
||||
|
||||
```python
|
||||
def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
|
||||
@dataclass
|
||||
class BlobsBundle(object):
|
||||
commitments: Sequence[KZGCommitment]
|
||||
proofs: Sequence[KZGProof]
|
||||
blobs: Sequence[Blob]
|
||||
```
|
||||
|
||||
### Modified `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
block_value: uint256
|
||||
blobs_bundle: BlobsBundle
|
||||
```
|
||||
|
||||
## Protocol
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### Modified `get_payload`
|
||||
|
||||
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
|
||||
has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
|
||||
```python
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
"""
|
||||
Return ExecutionPayload, uint256, BlobsBundle objects.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
...
|
||||
```
|
||||
@@ -60,19 +99,21 @@ All validator responsibilities remain unchanged other than those noted below.
|
||||
##### Blob KZG commitments
|
||||
|
||||
1. After retrieving the execution payload from the execution engine as specified in Capella,
|
||||
use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`.
|
||||
use the `payload_id` to retrieve `blobs`, `blob_kzg_commitments`, and `blob_kzg_proofs`
|
||||
via `get_payload(payload_id).blobs_bundle`.
|
||||
2. Validate `blobs` and `blob_kzg_commitments`:
|
||||
|
||||
```python
|
||||
def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_commitments: Sequence[KZGCommitment]) -> None:
|
||||
blob_kzg_commitments: Sequence[KZGCommitment],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> None:
|
||||
# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
|
||||
assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments)
|
||||
|
||||
# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
|
||||
assert len(blob_kzg_commitments) == len(blobs)
|
||||
assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)]
|
||||
assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs)
|
||||
assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs)
|
||||
```
|
||||
|
||||
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
|
||||
@@ -87,7 +128,9 @@ Blobs associated with a block are packaged into sidecar objects for distribution
|
||||
|
||||
Each `sidecar` is obtained from:
|
||||
```python
|
||||
def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]:
|
||||
def get_blob_sidecars(block: BeaconBlock,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
|
||||
return [
|
||||
BlobSidecar(
|
||||
block_root=hash_tree_root(block),
|
||||
@@ -96,14 +139,14 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo
|
||||
block_parent_root=block.parent_root,
|
||||
blob=blob,
|
||||
kzg_commitment=block.body.blob_kzg_commitments[index],
|
||||
kzg_proof=compute_blob_kzg_proof(blob, block.body.blob_kzg_commitments[index]),
|
||||
kzg_proof=blob_kzg_proofs[index],
|
||||
)
|
||||
for index, blob in enumerate(blobs)
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
|
||||
`signature` is obtained from:
|
||||
|
||||
@@ -116,6 +159,15 @@ def get_blob_sidecar_signature(state: BeaconState,
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
The `subnet_id` for the `signed_sidecar` is calculated with:
|
||||
- Let `blob_index = signed_sidecar.message.index`.
|
||||
- Let `subnet_id = compute_subnet_for_blob_sidecar(blob_index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID:
|
||||
return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
|
||||
|
||||
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,
|
||||
|
||||
@@ -269,7 +269,7 @@ Additional preset configurations can be found in the [`configs`](../../configs)
|
||||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stabilizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
@@ -1036,7 +1036,7 @@ def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
|
||||
"""
|
||||
Return the combined effective balance of the ``indices``.
|
||||
``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
Math safe up to ~10B ETH, afterwhich this overflows uint64.
|
||||
Math safe up to ~10B ETH, after which this overflows uint64.
|
||||
"""
|
||||
return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
|
||||
```
|
||||
@@ -1835,13 +1835,12 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
##### Deposits
|
||||
|
||||
```python
|
||||
def get_validator_from_deposit(deposit: Deposit) -> Validator:
|
||||
amount = deposit.data.amount
|
||||
def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
|
||||
return Validator(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
activation_epoch=FAR_FUTURE_EPOCH,
|
||||
exit_epoch=FAR_FUTURE_EPOCH,
|
||||
@@ -1850,6 +1849,34 @@ def get_validator_from_deposit(deposit: Deposit) -> Validator:
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def apply_deposit(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> None:
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
if not bls.Verify(pubkey, signing_root, signature):
|
||||
return
|
||||
|
||||
# Add validator and balance entries
|
||||
state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
|
||||
state.balances.append(amount)
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
```
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Verify the Merkle branch
|
||||
@@ -1864,28 +1891,13 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
||||
pubkey = deposit.data.pubkey
|
||||
amount = deposit.data.amount
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
if not bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
return
|
||||
|
||||
# Add validator and balance entries
|
||||
state.validators.append(get_validator_from_deposit(deposit))
|
||||
state.balances.append(amount)
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
apply_deposit(
|
||||
state=state,
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
signature=deposit.data.signature,
|
||||
)
|
||||
```
|
||||
|
||||
##### Voluntary exits
|
||||
|
||||
@@ -8,21 +8,28 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Constant](#constant)
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
- [Helpers](#helpers)
|
||||
- [`LatestMessage`](#latestmessage)
|
||||
- [`Store`](#store)
|
||||
- [`is_previous_epoch_justified`](#is_previous_epoch_justified)
|
||||
- [`get_forkchoice_store`](#get_forkchoice_store)
|
||||
- [`get_slots_since_genesis`](#get_slots_since_genesis)
|
||||
- [`get_current_slot`](#get_current_slot)
|
||||
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
|
||||
- [`get_ancestor`](#get_ancestor)
|
||||
- [`get_checkpoint_block`](#get_checkpoint_block)
|
||||
- [`get_weight`](#get_weight)
|
||||
- [`get_voting_source`](#get_voting_source)
|
||||
- [`filter_block_tree`](#filter_block_tree)
|
||||
- [`get_filtered_block_tree`](#get_filtered_block_tree)
|
||||
- [`get_head`](#get_head)
|
||||
- [`should_update_justified_checkpoint`](#should_update_justified_checkpoint)
|
||||
- [`update_checkpoints`](#update_checkpoints)
|
||||
- [`update_unrealized_checkpoints`](#update_unrealized_checkpoints)
|
||||
- [Pull-up tip helpers](#pull-up-tip-helpers)
|
||||
- [`compute_pulled_up_tip`](#compute_pulled_up_tip)
|
||||
- [`on_tick` helpers](#on_tick-helpers)
|
||||
- [`on_tick_per_slot`](#on_tick_per_slot)
|
||||
- [`on_attestation` helpers](#on_attestation-helpers)
|
||||
- [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time)
|
||||
- [`validate_on_attestation`](#validate_on_attestation)
|
||||
@@ -67,12 +74,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass
|
||||
| -------------------- | ----------- |
|
||||
| `INTERVALS_PER_SLOT` | `uint64(3)` |
|
||||
|
||||
### Preset
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| -------------------------------- | ------------ | :---: | :--------: |
|
||||
| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds |
|
||||
|
||||
### Configuration
|
||||
|
||||
| Name | Value |
|
||||
@@ -94,6 +95,13 @@ class LatestMessage(object):
|
||||
|
||||
#### `Store`
|
||||
|
||||
The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below:
|
||||
|
||||
- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm.
|
||||
- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint.
|
||||
- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries.
|
||||
- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block.
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class Store(object):
|
||||
@@ -101,13 +109,24 @@ class Store(object):
|
||||
genesis_time: uint64
|
||||
justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
best_justified_checkpoint: Checkpoint
|
||||
unrealized_justified_checkpoint: Checkpoint
|
||||
unrealized_finalized_checkpoint: Checkpoint
|
||||
proposer_boost_root: Root
|
||||
equivocating_indices: Set[ValidatorIndex]
|
||||
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
||||
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
||||
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
||||
```
|
||||
|
||||
#### `is_previous_epoch_justified`
|
||||
|
||||
```python
|
||||
def is_previous_epoch_justified(store: Store) -> bool:
|
||||
current_slot = get_current_slot(store)
|
||||
current_epoch = compute_epoch_at_slot(current_slot)
|
||||
return store.justified_checkpoint.epoch + 1 == current_epoch
|
||||
```
|
||||
|
||||
#### `get_forkchoice_store`
|
||||
@@ -130,12 +149,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
||||
genesis_time=anchor_state.genesis_time,
|
||||
justified_checkpoint=justified_checkpoint,
|
||||
finalized_checkpoint=finalized_checkpoint,
|
||||
best_justified_checkpoint=justified_checkpoint,
|
||||
unrealized_justified_checkpoint=justified_checkpoint,
|
||||
unrealized_finalized_checkpoint=finalized_checkpoint,
|
||||
proposer_boost_root=proposer_boost_root,
|
||||
equivocating_indices=set(),
|
||||
blocks={anchor_root: copy(anchor_block)},
|
||||
block_states={anchor_root: copy(anchor_state)},
|
||||
checkpoint_states={justified_checkpoint: copy(anchor_state)},
|
||||
unrealized_justifications={anchor_root: justified_checkpoint}
|
||||
)
|
||||
```
|
||||
|
||||
@@ -167,11 +188,18 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||
block = store.blocks[root]
|
||||
if block.slot > slot:
|
||||
return get_ancestor(store, block.parent_root, slot)
|
||||
elif block.slot == slot:
|
||||
return root
|
||||
else:
|
||||
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
return root
|
||||
return root
|
||||
```
|
||||
|
||||
#### `get_checkpoint_block`
|
||||
|
||||
```python
|
||||
def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root:
|
||||
"""
|
||||
Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root``
|
||||
"""
|
||||
epoch_first_slot = compute_start_slot_at_epoch(epoch)
|
||||
return get_ancestor(store, root, epoch_first_slot)
|
||||
```
|
||||
|
||||
#### `get_weight`
|
||||
@@ -179,9 +207,12 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||
```python
|
||||
def get_weight(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
unslashed_and_active_indices = [
|
||||
i for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(sum(
|
||||
state.validators[i].effective_balance for i in active_indices
|
||||
state.validators[i].effective_balance for i in unslashed_and_active_indices
|
||||
if (i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
@@ -199,8 +230,30 @@ def get_weight(store: Store, root: Root) -> Gwei:
|
||||
return attestation_score + proposer_score
|
||||
```
|
||||
|
||||
#### `get_voting_source`
|
||||
|
||||
```python
|
||||
def get_voting_source(store: Store, block_root: Root) -> Checkpoint:
|
||||
"""
|
||||
Compute the voting source checkpoint in event that block with root ``block_root`` is the head block
|
||||
"""
|
||||
block = store.blocks[block_root]
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
block_epoch = compute_epoch_at_slot(block.slot)
|
||||
if current_epoch > block_epoch:
|
||||
# The block is from a prior epoch, the voting source will be pulled-up
|
||||
return store.unrealized_justifications[block_root]
|
||||
else:
|
||||
# The block is not from a prior epoch, therefore the voting source is not pulled up
|
||||
head_state = store.block_states[block_root]
|
||||
return head_state.current_justified_checkpoint
|
||||
|
||||
```
|
||||
|
||||
#### `filter_block_tree`
|
||||
|
||||
*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`.
|
||||
|
||||
```python
|
||||
def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
|
||||
block = store.blocks[block_root]
|
||||
@@ -218,17 +271,34 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB
|
||||
return True
|
||||
return False
|
||||
|
||||
# If leaf block, check finalized/justified checkpoints as matching latest.
|
||||
head_state = store.block_states[block_root]
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
voting_source = get_voting_source(store, block_root)
|
||||
|
||||
# The voting source should be at the same height as the store's justified checkpoint
|
||||
correct_justified = (
|
||||
store.justified_checkpoint.epoch == GENESIS_EPOCH
|
||||
or head_state.current_justified_checkpoint == store.justified_checkpoint
|
||||
or voting_source.epoch == store.justified_checkpoint.epoch
|
||||
)
|
||||
|
||||
# If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized
|
||||
# justification is higher than the store and that the voting source is not more than two epochs ago
|
||||
if not correct_justified and is_previous_epoch_justified(store):
|
||||
correct_justified = (
|
||||
store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and
|
||||
voting_source.epoch + 2 >= current_epoch
|
||||
)
|
||||
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
|
||||
correct_finalized = (
|
||||
store.finalized_checkpoint.epoch == GENESIS_EPOCH
|
||||
or head_state.finalized_checkpoint == store.finalized_checkpoint
|
||||
or store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
)
|
||||
|
||||
# If expected finalized/justified, add to viable block-tree and signal viability to parent.
|
||||
if correct_justified and correct_finalized:
|
||||
blocks[block_root] = block
|
||||
@@ -272,25 +342,80 @@ def get_head(store: Store) -> Root:
|
||||
head = max(children, key=lambda root: (get_weight(store, root), root))
|
||||
```
|
||||
|
||||
#### `should_update_justified_checkpoint`
|
||||
#### `update_checkpoints`
|
||||
|
||||
```python
|
||||
def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
|
||||
def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None:
|
||||
"""
|
||||
To address the bouncing attack, only update conflicting justified
|
||||
checkpoints in the fork choice if in the early slots of the epoch.
|
||||
Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
|
||||
See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
Update checkpoints in store if necessary
|
||||
"""
|
||||
if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
|
||||
return True
|
||||
# Update justified checkpoint
|
||||
if justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
store.justified_checkpoint = justified_checkpoint
|
||||
|
||||
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
|
||||
return False
|
||||
# Update finalized checkpoint
|
||||
if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = finalized_checkpoint
|
||||
```
|
||||
|
||||
return True
|
||||
#### `update_unrealized_checkpoints`
|
||||
|
||||
```python
|
||||
def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint,
|
||||
unrealized_finalized_checkpoint: Checkpoint) -> None:
|
||||
"""
|
||||
Update unrealized checkpoints in store if necessary
|
||||
"""
|
||||
# Update unrealized justified checkpoint
|
||||
if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch:
|
||||
store.unrealized_justified_checkpoint = unrealized_justified_checkpoint
|
||||
|
||||
# Update unrealized finalized checkpoint
|
||||
if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch:
|
||||
store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint
|
||||
```
|
||||
|
||||
|
||||
#### Pull-up tip helpers
|
||||
|
||||
##### `compute_pulled_up_tip`
|
||||
|
||||
```python
|
||||
def compute_pulled_up_tip(store: Store, block_root: Root) -> None:
|
||||
state = store.block_states[block_root].copy()
|
||||
# Pull up the post-state of the block to the next epoch boundary
|
||||
process_justification_and_finalization(state)
|
||||
|
||||
store.unrealized_justifications[block_root] = state.current_justified_checkpoint
|
||||
update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
|
||||
# If the block is from a prior epoch, apply the realized values
|
||||
block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot)
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
if block_epoch < current_epoch:
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
```
|
||||
|
||||
#### `on_tick` helpers
|
||||
|
||||
##### `on_tick_per_slot`
|
||||
|
||||
```python
|
||||
def on_tick_per_slot(store: Store, time: uint64) -> None:
|
||||
previous_slot = get_current_slot(store)
|
||||
|
||||
# Update store time
|
||||
store.time = time
|
||||
|
||||
current_slot = get_current_slot(store)
|
||||
|
||||
# If this is a new slot, reset store.proposer_boost_root
|
||||
if current_slot > previous_slot:
|
||||
store.proposer_boost_root = Root()
|
||||
|
||||
# If a new epoch, pull-up justification and finalization from previous epoch
|
||||
if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
|
||||
update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
|
||||
```
|
||||
|
||||
#### `on_attestation` helpers
|
||||
@@ -323,7 +448,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc
|
||||
# Check that the epoch number and slot number are matching
|
||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
|
||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
# Attestation target must be for a known block. If target block is unknown, delay consideration until block is found
|
||||
assert target.root in store.blocks
|
||||
|
||||
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
@@ -332,8 +457,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc
|
||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
|
||||
# LMD vote must be consistent with FFG vote target
|
||||
target_slot = compute_start_slot_at_epoch(target.epoch)
|
||||
assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
|
||||
assert target.root == get_checkpoint_block(store, attestation.data.beacon_block_root, target.epoch)
|
||||
|
||||
# Attestations can only affect the fork choice of subsequent slots.
|
||||
# Delay consideration in the fork choice until their slot is in the past.
|
||||
@@ -371,27 +495,13 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn
|
||||
|
||||
```python
|
||||
def on_tick(store: Store, time: uint64) -> None:
|
||||
previous_slot = get_current_slot(store)
|
||||
|
||||
# update store time
|
||||
store.time = time
|
||||
|
||||
current_slot = get_current_slot(store)
|
||||
|
||||
# Reset store.proposer_boost_root if this is a new slot
|
||||
if current_slot > previous_slot:
|
||||
store.proposer_boost_root = Root()
|
||||
|
||||
# Not a new epoch, return
|
||||
if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
|
||||
return
|
||||
|
||||
# Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
|
||||
if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
|
||||
store.justified_checkpoint = store.best_justified_checkpoint
|
||||
# If the ``store.time`` falls behind, while loop catches up slot by slot
|
||||
# to ensure that every previous slot is processed with ``on_tick_per_slot``
|
||||
tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT
|
||||
while get_current_slot(store) < tick_slot:
|
||||
previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT
|
||||
on_tick_per_slot(store, previous_time)
|
||||
on_tick_per_slot(store, time)
|
||||
```
|
||||
|
||||
#### `on_block`
|
||||
@@ -410,15 +520,21 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert block.slot > finalized_slot
|
||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block.parent_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
block_root = hash_tree_root(block)
|
||||
state_transition(state, signed_block, True)
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
store.blocks[block_root] = block
|
||||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
@@ -426,17 +542,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
|
||||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
# Eagerly compute unrealized justification and finality
|
||||
compute_pulled_up_tip(store, block_root)
|
||||
```
|
||||
|
||||
#### `on_attestation`
|
||||
|
||||
@@ -111,11 +111,11 @@ It consists of four main sections:
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
# Network fundamentals
|
||||
## Network fundamentals
|
||||
|
||||
This section outlines the specification for the networking stack in Ethereum consensus-layer clients.
|
||||
|
||||
## Transport
|
||||
### Transport
|
||||
|
||||
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently),
|
||||
we hereby define a profile for basic interoperability.
|
||||
@@ -133,14 +133,14 @@ All listening endpoints must be publicly dialable, and thus not rely on libp2p c
|
||||
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.),
|
||||
MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
||||
|
||||
## Encryption and identification
|
||||
### Encryption and identification
|
||||
|
||||
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
||||
channel handshake with `secp256k1` identities will be used for encryption.
|
||||
|
||||
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
||||
|
||||
## Protocol Negotiation
|
||||
### Protocol Negotiation
|
||||
|
||||
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
||||
|
||||
@@ -148,7 +148,7 @@ Clients MUST support [multistream-select 1.0](https://github.com/multiformats/mu
|
||||
and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies.
|
||||
Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
|
||||
|
||||
## Multiplexing
|
||||
### Multiplexing
|
||||
|
||||
During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations.
|
||||
This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC),
|
||||
@@ -163,9 +163,9 @@ and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md).
|
||||
If both are supported by the client, yamux MUST take precedence during negotiation.
|
||||
See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||
|
||||
# Consensus-layer network interaction domains
|
||||
## Consensus-layer network interaction domains
|
||||
|
||||
## Configuration
|
||||
### Configuration
|
||||
|
||||
This section outlines constants that are used in this spec.
|
||||
|
||||
@@ -182,7 +182,7 @@ This section outlines constants that are used in this spec.
|
||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||
|
||||
## MetaData
|
||||
### MetaData
|
||||
|
||||
Clients MUST locally store the following `MetaData`:
|
||||
|
||||
@@ -203,7 +203,7 @@ Where
|
||||
is entirely independent of the ENR sequence number,
|
||||
and will in most cases be out of sync with the ENR sequence number.
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol
|
||||
including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension.
|
||||
@@ -229,7 +229,7 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master
|
||||
for peer scoring and other attack mitigations.
|
||||
These are currently under investigation and will be spec'd and released to mainnet when they are ready.
|
||||
|
||||
### Topics and messages
|
||||
#### Topics and messages
|
||||
|
||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages).
|
||||
Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`.
|
||||
@@ -289,7 +289,7 @@ We utilize `ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are
|
||||
If all validations pass, return `ACCEPT`.
|
||||
If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition.
|
||||
|
||||
#### Global topics
|
||||
##### Global topics
|
||||
|
||||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||
@@ -297,7 +297,7 @@ and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the ne
|
||||
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||
|
||||
##### `beacon_block`
|
||||
###### `beacon_block`
|
||||
|
||||
The `beacon_block` topic is used solely for propagating new signed beacon blocks to all nodes on the networks.
|
||||
Signed blocks are sent in their entirety.
|
||||
@@ -317,7 +317,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
||||
- _[REJECT]_ The block is from a higher slot than its parent.
|
||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
||||
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
`get_checkpoint_block(store, block.parent_root, store.finalized_checkpoint.epoch)
|
||||
== store.finalized_checkpoint.root`
|
||||
- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot
|
||||
in the context of the current shuffling (defined by `parent_root`/`slot`).
|
||||
@@ -325,7 +325,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||
the block MAY be queued for later processing while proposers for the block's branch are calculated --
|
||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
##### `beacon_aggregate_and_proof`
|
||||
###### `beacon_aggregate_and_proof`
|
||||
|
||||
The `beacon_aggregate_and_proof` topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s)
|
||||
to subscribing nodes (typically validators) to be included in future blocks.
|
||||
@@ -356,11 +356,11 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||
- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e.
|
||||
`get_ancestor(store, aggregate.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
`get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch)
|
||||
== store.finalized_checkpoint.root`
|
||||
|
||||
|
||||
##### `voluntary_exit`
|
||||
###### `voluntary_exit`
|
||||
|
||||
The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network.
|
||||
Signed voluntary exits are sent in their entirety.
|
||||
@@ -370,7 +370,7 @@ The following validations MUST pass before forwarding the `signed_voluntary_exit
|
||||
for the validator with index `signed_voluntary_exit.message.validator_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation.
|
||||
|
||||
##### `proposer_slashing`
|
||||
###### `proposer_slashing`
|
||||
|
||||
The `proposer_slashing` topic is used solely for propagating proposer slashings to proposers on the network.
|
||||
Proposer slashings are sent in their entirety.
|
||||
@@ -380,7 +380,7 @@ The following validations MUST pass before forwarding the `proposer_slashing` on
|
||||
for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation.
|
||||
|
||||
##### `attester_slashing`
|
||||
###### `attester_slashing`
|
||||
|
||||
The `attester_slashing` topic is used solely for propagating attester slashings to proposers on the network.
|
||||
Attester slashings are sent in their entirety.
|
||||
@@ -392,11 +392,11 @@ Clients who receive an attester slashing on this topic MUST validate the conditi
|
||||
verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
||||
- _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation.
|
||||
|
||||
#### Attestation subnets
|
||||
##### Attestation subnets
|
||||
|
||||
Attestation subnets are used to propagate unaggregated attestations to subsections of the network.
|
||||
|
||||
##### `beacon_attestation_{subnet_id}`
|
||||
###### `beacon_attestation_{subnet_id}`
|
||||
|
||||
The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated attestations
|
||||
to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`.
|
||||
@@ -425,14 +425,14 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||
(a client MAY queue attestations for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||
`get_checkpoint_block(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root`
|
||||
- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
`get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch)
|
||||
== store.finalized_checkpoint.root`
|
||||
|
||||
|
||||
|
||||
#### Attestations and Aggregation
|
||||
##### Attestations and Aggregation
|
||||
|
||||
Attestation broadcasting is grouped into subnets defined by a topic.
|
||||
The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`.
|
||||
@@ -445,7 +445,7 @@ Unaggregated attestations are sent as `Attestation`s to the subnet topic,
|
||||
|
||||
Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s.
|
||||
|
||||
### Encodings
|
||||
#### Encodings
|
||||
|
||||
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
||||
|
||||
@@ -461,9 +461,9 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master
|
||||
Implementations MUST use a single encoding for gossip.
|
||||
Changing an encoding will require coordination between participating implementations.
|
||||
|
||||
## The Req/Resp domain
|
||||
### The Req/Resp domain
|
||||
|
||||
### Protocol identification
|
||||
#### Protocol identification
|
||||
|
||||
Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form:
|
||||
|
||||
@@ -485,7 +485,7 @@ With:
|
||||
This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0`
|
||||
to handle the request type, version, and encoding negotiation before establishing the underlying streams.
|
||||
|
||||
### Req/Resp interaction
|
||||
#### Req/Resp interaction
|
||||
|
||||
We use ONE stream PER request/response interaction.
|
||||
Streams are closed when the interaction finishes, whether in success or in error.
|
||||
@@ -515,7 +515,7 @@ Regardless of these type specific bounds, a global maximum uncompressed byte siz
|
||||
Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately.
|
||||
Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
||||
|
||||
#### Requesting side
|
||||
##### Requesting side
|
||||
|
||||
Once a new stream with the protocol ID for the request type has been negotiated, the full request message SHOULD be sent immediately.
|
||||
The request MUST be encoded according to the encoding strategy.
|
||||
@@ -537,7 +537,7 @@ A requester SHOULD read from the stream until either:
|
||||
For requests consisting of a single valid `response_chunk`,
|
||||
the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream.
|
||||
|
||||
#### Responding side
|
||||
##### Responding side
|
||||
|
||||
Once a new stream with the protocol ID for the request type has been negotiated,
|
||||
the responder SHOULD process the incoming request and MUST validate it before processing it.
|
||||
@@ -588,7 +588,7 @@ The `ErrorMessage` schema is:
|
||||
*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes).
|
||||
Clients MUST treat as valid any byte sequences.
|
||||
|
||||
### Encoding strategies
|
||||
#### Encoding strategies
|
||||
|
||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction.
|
||||
Only one value is possible at this time:
|
||||
@@ -599,7 +599,7 @@ Only one value is possible at this time:
|
||||
For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
||||
This encoding type MUST be supported by all clients.
|
||||
|
||||
#### SSZ-snappy encoding strategy
|
||||
##### SSZ-snappy encoding strategy
|
||||
|
||||
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded.
|
||||
|
||||
@@ -646,9 +646,9 @@ constituents individually as `response_chunk`s. For example, the
|
||||
`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s.
|
||||
Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
||||
|
||||
### Messages
|
||||
#### Messages
|
||||
|
||||
#### Status
|
||||
##### Status
|
||||
|
||||
**Protocol ID:** ``/eth2/beacon_chain/req/status/1/``
|
||||
|
||||
@@ -694,7 +694,7 @@ SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange`
|
||||
the client might need to send `Status` request again to learn if the peer has a higher head.
|
||||
Implementers are free to implement such behavior in their own way.
|
||||
|
||||
#### Goodbye
|
||||
##### Goodbye
|
||||
|
||||
**Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/``
|
||||
|
||||
@@ -718,7 +718,7 @@ The request/response MUST be encoded as a single SSZ-field.
|
||||
|
||||
The response MUST consist of a single `response_chunk`.
|
||||
|
||||
#### BeaconBlocksByRange
|
||||
##### BeaconBlocksByRange
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/`
|
||||
|
||||
@@ -795,7 +795,7 @@ In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_roo
|
||||
After the initial block, clients MAY stop in the process of responding
|
||||
if their fork choice changes the view of the chain in the context of the request.
|
||||
|
||||
#### BeaconBlocksByRoot
|
||||
##### BeaconBlocksByRoot
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/`
|
||||
|
||||
@@ -835,7 +835,7 @@ Clients MAY limit the number of blocks in the response.
|
||||
|
||||
`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
|
||||
|
||||
#### Ping
|
||||
##### Ping
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/ping/1/`
|
||||
|
||||
@@ -867,7 +867,7 @@ The request MUST be encoded as an SSZ-field.
|
||||
|
||||
The response MUST consist of a single `response_chunk`.
|
||||
|
||||
#### GetMetaData
|
||||
##### GetMetaData
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/metadata/1/`
|
||||
|
||||
@@ -890,14 +890,14 @@ The response MUST be encoded as an SSZ-container.
|
||||
|
||||
The response MUST consist of a single `response_chunk`.
|
||||
|
||||
## The discovery domain: discv5
|
||||
### The discovery domain: discv5
|
||||
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery.
|
||||
|
||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only.
|
||||
`discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||
|
||||
### Integration into libp2p stacks
|
||||
#### Integration into libp2p stacks
|
||||
|
||||
`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor
|
||||
to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go)
|
||||
@@ -908,7 +908,7 @@ and the outputs will be multiaddrs converted from the ENR records returned by th
|
||||
|
||||
This integration enables the libp2p stack to subsequently form connections and streams with discovered peers.
|
||||
|
||||
### ENR structure
|
||||
#### ENR structure
|
||||
|
||||
The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the following entries
|
||||
(exclusive of the sequence number and signature, which MUST be present in an ENR):
|
||||
@@ -923,7 +923,7 @@ The ENR MAY contain the following entries:
|
||||
|
||||
Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778).
|
||||
|
||||
#### Attestation subnet bitfield
|
||||
##### Attestation subnet bitfield
|
||||
|
||||
The ENR `attnets` entry signifies the attestation subnet bitfield with the following form
|
||||
to more easily discover peers participating in particular attestation gossip subnets.
|
||||
@@ -936,7 +936,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a
|
||||
|
||||
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
||||
|
||||
#### `eth2` field
|
||||
##### `eth2` field
|
||||
|
||||
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version,
|
||||
and next fork epoch to ensure connections are made with peers on the intended Ethereum network.
|
||||
@@ -979,11 +979,11 @@ Clients MAY connect to peers with the same `fork_digest` but a different `next_f
|
||||
Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients,
|
||||
these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||
|
||||
# Design decision rationale
|
||||
## Design decision rationale
|
||||
|
||||
## Transport
|
||||
### Transport
|
||||
|
||||
### Why are we defining specific transports?
|
||||
#### Why are we defining specific transports?
|
||||
|
||||
libp2p peers can listen on multiple transports concurrently, and these can change over time.
|
||||
Multiaddrs encode not only the address but also the transport to be used to dial.
|
||||
@@ -992,7 +992,7 @@ Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or W
|
||||
|
||||
However, it is useful to define a minimum baseline for interoperability purposes.
|
||||
|
||||
### Can clients support other transports/handshakes than the ones mandated by the spec?
|
||||
#### Can clients support other transports/handshakes than the ones mandated by the spec?
|
||||
|
||||
Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice.
|
||||
While interoperability shall not be harmed by lack of such support, the advantages are desirable:
|
||||
@@ -1007,7 +1007,7 @@ and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-
|
||||
The usage of one handshake procedure or the other shall be transparent to the application layer,
|
||||
once the libp2p Host/Node object has been configured appropriately.
|
||||
|
||||
### What are the advantages of using TCP/QUIC/Websockets?
|
||||
#### What are the advantages of using TCP/QUIC/Websockets?
|
||||
|
||||
TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today.
|
||||
HTTP/1.1 and HTTP/2 run atop TCP.
|
||||
@@ -1027,7 +1027,7 @@ and we may only become subject to standard IP-based firewall filtering—somethi
|
||||
WebSockets and/or WebRTC transports are necessary for interaction with browsers,
|
||||
and will become increasingly important as we incorporate browser-based light clients to the Ethereum network.
|
||||
|
||||
### Why do we not just support a single transport?
|
||||
#### Why do we not just support a single transport?
|
||||
|
||||
Networks evolve.
|
||||
Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art.
|
||||
@@ -1039,7 +1039,7 @@ Clients can adopt new transports without breaking old ones, and the multi-transp
|
||||
(e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS),
|
||||
without the need for proxying or trust delegation to servers.
|
||||
|
||||
### Why are we not using QUIC from the start?
|
||||
#### Why are we not using QUIC from the start?
|
||||
|
||||
The QUIC standard is still not finalized (at working draft 22 at the time of writing),
|
||||
and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations).
|
||||
@@ -1052,9 +1052,9 @@ On the other hand, TLS 1.3 is the newest, simplified iteration of TLS.
|
||||
Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function.
|
||||
Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features.
|
||||
|
||||
## Multiplexing
|
||||
### Multiplexing
|
||||
|
||||
### Why are we using mplex/yamux?
|
||||
#### Why are we using mplex/yamux?
|
||||
|
||||
[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control.
|
||||
Implementations exist in a limited set of languages, and it’s not a trivial piece to develop.
|
||||
@@ -1066,9 +1066,9 @@ It does not support stream-level congestion control and is subject to head-of-li
|
||||
Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing,
|
||||
but they need to be layered atop TCP, WebSockets, and other transports that lack such support.
|
||||
|
||||
## Protocol Negotiation
|
||||
### Protocol Negotiation
|
||||
|
||||
### When is multiselect 2.0 due and why do we plan to migrate to it?
|
||||
#### When is multiselect 2.0 due and why do we plan to migrate to it?
|
||||
|
||||
multiselect 2.0 is currently being conceptualized.
|
||||
The debate started [on this issue](https://github.com/libp2p/specs/pull/95),
|
||||
@@ -1084,7 +1084,7 @@ We plan to eventually migrate to multiselect 2.0 because it will:
|
||||
3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation.
|
||||
4. Provide the building blocks for enhanced censorship resistance.
|
||||
|
||||
### What is the difference between connection-level and stream-level protocol negotiation?
|
||||
#### What is the difference between connection-level and stream-level protocol negotiation?
|
||||
|
||||
All libp2p connections must be authenticated, encrypted, and multiplexed.
|
||||
Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported:
|
||||
@@ -1101,9 +1101,9 @@ When opening streams, peers pin a protocol to that stream, by conducting *stream
|
||||
At present, multistream-select 1.0 is used for both types of negotiation,
|
||||
but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation.
|
||||
|
||||
## Encryption
|
||||
### Encryption
|
||||
|
||||
### Why are we not supporting SecIO?
|
||||
#### Why are we not supporting SecIO?
|
||||
|
||||
SecIO has been the default encryption layer for libp2p for years.
|
||||
It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
||||
@@ -1114,7 +1114,7 @@ a mechanism that multiselect 2.0 will leverage to reduce round trips during conn
|
||||
|
||||
SecIO is not considered secure for the purposes of this spec.
|
||||
|
||||
### Why are we using Noise?
|
||||
#### Why are we using Noise?
|
||||
|
||||
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
||||
|
||||
@@ -1129,7 +1129,7 @@ and are used in major cryptographic-centric projects like WireGuard, I2P, and Li
|
||||
[Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf)
|
||||
have assessed the stated security goals of several Noise handshakes with positive results.
|
||||
|
||||
### Why are we using encryption at all?
|
||||
#### Why are we using encryption at all?
|
||||
|
||||
Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance.
|
||||
These properties are derived from the following security guarantees that apply to the entire communication between two peers:
|
||||
@@ -1146,9 +1146,9 @@ Note that transport-level encryption is not exclusive of application-level encry
|
||||
Transport-level encryption secures the communication itself,
|
||||
while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
||||
|
||||
## Gossipsub
|
||||
### Gossipsub
|
||||
|
||||
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||
#### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||
|
||||
Pubsub is a technique to broadcast/disseminate data across a network rapidly.
|
||||
Such data is packaged in fire-and-forget messages that do not require a response from every recipient.
|
||||
@@ -1156,18 +1156,18 @@ Peers subscribed to a topic participate in the propagation of messages in that t
|
||||
|
||||
The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)).
|
||||
|
||||
### Why are we using topics to segregate encodings, yet only support one encoding?
|
||||
#### Why are we using topics to segregate encodings, yet only support one encoding?
|
||||
|
||||
For future extensibility with almost zero overhead now (besides the extra bytes in the topic name).
|
||||
|
||||
### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
||||
#### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
||||
|
||||
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork.
|
||||
|
||||
When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic,
|
||||
the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch.
|
||||
|
||||
### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
||||
#### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
||||
|
||||
Supporting multiple topics/encodings would require the presence of relayers to translate between encodings
|
||||
and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state,
|
||||
@@ -1182,7 +1182,7 @@ but the price here is pretty high in terms of overhead -- both computational and
|
||||
|
||||
It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic.
|
||||
|
||||
### Why are the topics strings and not hashes?
|
||||
#### Why are the topics strings and not hashes?
|
||||
|
||||
Topic names have a hierarchical structure.
|
||||
In the future, gossipsub may support wildcard subscriptions
|
||||
@@ -1195,14 +1195,14 @@ since the domain is finite anyway, and calculating a digest's preimage would be
|
||||
Furthermore, the topic names are shorter than their digest equivalents (assuming SHA-256 hash),
|
||||
so hashing topics would bloat messages unnecessarily.
|
||||
|
||||
### Why are we using the `StrictNoSign` signature policy?
|
||||
#### Why are we using the `StrictNoSign` signature policy?
|
||||
|
||||
The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would:
|
||||
- Expose origin of sender (`from`), type of sender (based on `seqno`)
|
||||
- Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`.
|
||||
- Introduce more message validation than necessary, e.g. no `signature`.
|
||||
|
||||
### Why are we overriding the default libp2p pubsub `message-id`?
|
||||
#### Why are we overriding the default libp2p pubsub `message-id`?
|
||||
|
||||
For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`.
|
||||
By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer.
|
||||
@@ -1214,7 +1214,7 @@ Some examples of where messages could be duplicated:
|
||||
Partial aggregates could be duplicated
|
||||
* Clients re-publishing seen messages
|
||||
|
||||
### Why are these specific gossip parameters chosen?
|
||||
#### Why are these specific gossip parameters chosen?
|
||||
|
||||
- `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults.
|
||||
- `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4).
|
||||
@@ -1233,7 +1233,7 @@ Some examples of where messages could be duplicated:
|
||||
Attestation gossip validity is bounded by an epoch, so this is the safe max bound.
|
||||
|
||||
|
||||
### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
|
||||
#### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
|
||||
|
||||
For some gossip channels (e.g. those for Attestations and BeaconBlocks),
|
||||
there are designated ranges of slots during which particular messages can be sent,
|
||||
@@ -1247,14 +1247,14 @@ For minimum and maximum allowable slot broadcast times,
|
||||
Although messages can at times be eagerly gossiped to the network,
|
||||
the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
|
||||
|
||||
### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
||||
#### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
||||
|
||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||
The exact grouping will be dependent on more involved network tests.
|
||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
|
||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||
#### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||
|
||||
Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff.
|
||||
There is no utility to the chain to broadcast attestations older than one epoch,
|
||||
@@ -1265,7 +1265,7 @@ In addition to this, relaying attestations requires validating the attestation i
|
||||
Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node.
|
||||
This would result in a higher resource burden and could serve as a DoS vector.
|
||||
|
||||
### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?
|
||||
#### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?
|
||||
|
||||
The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation
|
||||
to the global channel to ensure that proposers see their attestation for inclusion.
|
||||
@@ -1275,19 +1275,19 @@ the gossiped aggregate ensures that this dominant strategy will not flood the gl
|
||||
Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel.
|
||||
Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed.
|
||||
|
||||
### Why are we sending entire objects in the pubsub and not just hashes?
|
||||
#### Why are we sending entire objects in the pubsub and not just hashes?
|
||||
|
||||
Entire objects should be sent to get the greatest propagation speeds.
|
||||
If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer.
|
||||
In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from.
|
||||
Sending entire objects ensures that they get propagated through the entire network.
|
||||
|
||||
### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?
|
||||
#### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?
|
||||
|
||||
The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature
|
||||
due to not being fully synced to ensure that such (amplified) DOS attacks are not possible.
|
||||
|
||||
### How are we going to discover peers in a gossipsub topic?
|
||||
#### How are we going to discover peers in a gossipsub topic?
|
||||
|
||||
In Phase 0, peers for attestation subnets will be found using the `attnets` entry in the ENR.
|
||||
|
||||
@@ -1295,7 +1295,7 @@ Although this method will be sufficient for early upgrade of the beacon chain, w
|
||||
ENRs should ultimately not be used for this purpose.
|
||||
They are best suited to store identity, location, and capability information, rather than more volatile advertisements.
|
||||
|
||||
### How should fork version be used in practice?
|
||||
#### How should fork version be used in practice?
|
||||
|
||||
Fork versions are to be manually updated (likely via incrementing) at each hard fork.
|
||||
This is to provide native domain separation for signatures as well as to aid in usefulness for identitying peers (via ENRs)
|
||||
@@ -1308,9 +1308,9 @@ In these cases, extra care should be taken to isolate fork versions (e.g. flip a
|
||||
A node locally stores all previous and future planned fork versions along with the each fork epoch.
|
||||
This allows for handling sync and processing messages starting from past forks/epochs.
|
||||
|
||||
## Req/Resp
|
||||
### Req/Resp
|
||||
|
||||
### Why segregate requests into dedicated protocol IDs?
|
||||
#### Why segregate requests into dedicated protocol IDs?
|
||||
|
||||
Requests are segregated by protocol ID to:
|
||||
|
||||
@@ -1343,7 +1343,7 @@ Multiselect 2.0 will eventually remove this overhead by memoizing previously sel
|
||||
Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol
|
||||
so the additional overhead is not expected to significantly hinder this domain.
|
||||
|
||||
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||
#### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||
|
||||
We are using single-use streams where each stream is closed at the end of the message.
|
||||
Thus, libp2p transparently handles message delimiting in the underlying stream.
|
||||
@@ -1361,7 +1361,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi
|
||||
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints.
|
||||
Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
||||
|
||||
### Why do we version protocol strings with ordinals instead of semver?
|
||||
#### Why do we version protocol strings with ordinals instead of semver?
|
||||
|
||||
Using semver for network protocols is confusing.
|
||||
It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies.
|
||||
@@ -1382,11 +1382,11 @@ because it's unclear if "backwards compatibility" and "breaking change" apply on
|
||||
|
||||
For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes.
|
||||
|
||||
### Why is it called Req/Resp and not RPC?
|
||||
#### Why is it called Req/Resp and not RPC?
|
||||
|
||||
Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms.
|
||||
|
||||
### Why do we allow empty responses in block requests?
|
||||
#### Why do we allow empty responses in block requests?
|
||||
|
||||
When requesting blocks by range or root, it may happen that there are no blocks in the selected range or the responding node does not have the requested blocks.
|
||||
|
||||
@@ -1413,7 +1413,7 @@ Failing to provide blocks that nodes "should" have is reason to trust a peer les
|
||||
-- for example, if a particular peer gossips a block, it should have access to its parent.
|
||||
If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them.
|
||||
|
||||
### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?
|
||||
#### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?
|
||||
|
||||
When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time.
|
||||
By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale,
|
||||
@@ -1423,7 +1423,7 @@ To avoid this race condition, we allow the responding side to choose which branc
|
||||
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
||||
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
||||
|
||||
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
||||
#### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
||||
|
||||
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
|
||||
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
|
||||
@@ -1447,7 +1447,7 @@ MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
|
||||
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
|
||||
|
||||
### Why must the proposer signature be checked when backfilling blocks in the database?
|
||||
#### Why must the proposer signature be checked when backfilling blocks in the database?
|
||||
|
||||
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
|
||||
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
|
||||
@@ -1462,7 +1462,7 @@ Although in this particular use case this does not represent a decay in safety
|
||||
would represent invalid historic data and could be unwittingly transmitted to
|
||||
additional nodes.
|
||||
|
||||
### What's the effect of empty slots on the sync algorithm?
|
||||
#### What's the effect of empty slots on the sync algorithm?
|
||||
|
||||
When syncing one can only tell that a slot has been skipped on a particular branch
|
||||
by examining subsequent blocks and analyzing the graph formed by the parent root.
|
||||
@@ -1472,9 +1472,9 @@ For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], cli
|
||||
-- it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it)
|
||||
and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch.
|
||||
|
||||
## Discovery
|
||||
### Discovery
|
||||
|
||||
### Why are we using discv5 and not libp2p Kademlia DHT?
|
||||
#### Why are we using discv5 and not libp2p Kademlia DHT?
|
||||
|
||||
discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only.
|
||||
discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context.
|
||||
@@ -1490,7 +1490,7 @@ It should also help light clients of both networks find nodes with specific capa
|
||||
|
||||
discv5 is in the process of being audited.
|
||||
|
||||
### What is the difference between an ENR and a multiaddr, and why are we using ENRs?
|
||||
#### What is the difference between an ENR and a multiaddr, and why are we using ENRs?
|
||||
|
||||
Ethereum Node Records are self-certified node records.
|
||||
Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature.
|
||||
@@ -1510,7 +1510,7 @@ discv5 uses ENRs and we will presumably need to:
|
||||
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR
|
||||
(ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Ethereum execution-layer nodes).
|
||||
|
||||
### Why do we not form ENRs and find peers until genesis block/state is known?
|
||||
#### Why do we not form ENRs and find peers until genesis block/state is known?
|
||||
|
||||
Although client software might very well be running locally prior to the solidification of the beacon chain genesis state and block,
|
||||
clients cannot form valid ENRs prior to this point.
|
||||
@@ -1521,9 +1521,9 @@ Once genesis data is known, we can then form ENRs and safely find peers.
|
||||
When using a proof-of-work deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`,
|
||||
providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||
|
||||
## Compression/Encoding
|
||||
### Compression/Encoding
|
||||
|
||||
### Why are we using SSZ for encoding?
|
||||
#### Why are we using SSZ for encoding?
|
||||
|
||||
SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding,
|
||||
requiring no further dependencies to be added to client implementations.
|
||||
@@ -1533,7 +1533,7 @@ The actual data in most protocols will be further compressed for efficiency.
|
||||
SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent.
|
||||
It also has defined all required types that are required for this network specification.
|
||||
|
||||
### Why are we compressing, and at which layers?
|
||||
#### Why are we compressing, and at which layers?
|
||||
|
||||
We compress on the wire to achieve smaller payloads per-message, which, in aggregate,
|
||||
result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead.
|
||||
@@ -1563,13 +1563,13 @@ This looks different depending on the interaction layer:
|
||||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||
|
||||
### Why are we using Snappy for compression?
|
||||
#### Why are we using Snappy for compression?
|
||||
|
||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||
This prevents DOS vectors where large uncompressed data is sent.
|
||||
|
||||
### Can I get access to unencrypted bytes on the wire for debugging purposes?
|
||||
#### Can I get access to unencrypted bytes on the wire for debugging purposes?
|
||||
|
||||
Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages.
|
||||
It is recommended to use programming design patterns to encapsulate the logging logic cleanly.
|
||||
@@ -1580,7 +1580,7 @@ you can use logging facilities in those frameworks/runtimes to enable message tr
|
||||
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md)
|
||||
(which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
||||
|
||||
### What are SSZ type size bounds?
|
||||
#### What are SSZ type size bounds?
|
||||
|
||||
The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size.
|
||||
Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included.
|
||||
@@ -1589,7 +1589,7 @@ Other types are static, they have a fixed size: no dynamic-length content is inv
|
||||
For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e).
|
||||
It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds.
|
||||
|
||||
# libp2p implementations matrix
|
||||
## libp2p implementations matrix
|
||||
|
||||
This section will soon contain a matrix showing the maturity/state of the libp2p features required
|
||||
by this spec across the languages in which clients are being developed.
|
||||
|
||||
@@ -10,6 +10,7 @@ This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Containers](#containers)
|
||||
@@ -82,16 +83,28 @@ A validator is an entity that participates in the consensus of the Ethereum proo
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
|
||||
|
||||
## Custom types
|
||||
|
||||
We define the following Python custom types for type hinting and readability:
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `NodeID` | `uint256` | node identifier |
|
||||
| `SubnetID` | `uint64` | subnet identifier |
|
||||
|
||||
## Constants
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
||||
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
|
||||
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators |
|
||||
| `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
||||
| `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet |
|
||||
| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. |
|
||||
| `ATTESTATION_SUBNET_PREFIX_BITS` | `(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | |
|
||||
| `NODE_ID_BITS` | `256` | The bit length of uint256 is 256 |
|
||||
|
||||
## Containers
|
||||
|
||||
@@ -162,7 +175,7 @@ The `withdrawal_credentials` field must be such that:
|
||||
* `withdrawal_credentials[1:12] == b'\x00' * 11`
|
||||
* `withdrawal_credentials[12:] == eth1_withdrawal_address`
|
||||
|
||||
After the merge of the current Ethereum application layer into the Beacon Chain,
|
||||
After the merge of the current Ethereum execution layer into the Beacon Chain,
|
||||
withdrawals to `eth1_withdrawal_address` will simply be increases to the account's ETH balance that do **NOT** trigger any EVM execution.
|
||||
|
||||
### Submit deposit
|
||||
@@ -513,7 +526,9 @@ The `subnet_id` for the `attestation` is calculated with:
|
||||
- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
def compute_subnet_for_attestation(committees_per_slot: uint64,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex) -> SubnetID:
|
||||
"""
|
||||
Compute the correct subnet for an attestation for Phase 0.
|
||||
Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
@@ -521,7 +536,7 @@ def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, comm
|
||||
slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
|
||||
return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
### Attestation aggregation
|
||||
@@ -606,15 +621,31 @@ def get_aggregate_and_proof_signature(state: BeaconState,
|
||||
|
||||
## Phase 0 attestation subnet stability
|
||||
|
||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
|
||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should:
|
||||
|
||||
* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets
|
||||
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
||||
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
||||
* Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs.
|
||||
* Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets.
|
||||
* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function.
|
||||
|
||||
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
|
||||
```python
|
||||
def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID:
|
||||
node_id_prefix = node_id >> (NODE_ID_BITS - int(ATTESTATION_SUBNET_PREFIX_BITS))
|
||||
node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION
|
||||
permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)))
|
||||
permutated_prefix = compute_shuffled_index(
|
||||
node_id_prefix,
|
||||
1 << int(ATTESTATION_SUBNET_PREFIX_BITS),
|
||||
permutation_seed,
|
||||
)
|
||||
return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
```python
|
||||
def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
|
||||
return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
|
||||
```
|
||||
|
||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
|
||||
## How to avoid slashing
|
||||
|
||||
|
||||
@@ -375,7 +375,7 @@ Given all of this, we can say two things:
|
||||
justify an honest chain.
|
||||
2. **BNs which are syncing can optimistically import transition blocks.** In
|
||||
this case a justified chain already exists blocks. The poison block would be
|
||||
quickly reverted and would have no affect on liveness.
|
||||
quickly reverted and would have no effect on liveness.
|
||||
|
||||
Astute readers will notice that (2) contains a glaring assumption about network
|
||||
liveness. This is necessary because a node cannot feasibly ascertain that the
|
||||
@@ -408,13 +408,13 @@ Such a scenario requires manual intervention.
|
||||
|
||||
An alternative to optimistic sync is to run a light client inside/alongside
|
||||
beacon nodes that mitigates the need for optimistic sync by providing
|
||||
tip-of-chain blocks to the execution engine. However, light clients comes with
|
||||
tip-of-chain blocks to the execution engine. However, light clients come with
|
||||
their own set of complexities. Relying on light clients may also restrict nodes
|
||||
from syncing from genesis, if they so desire.
|
||||
|
||||
A notable thing about optimistic sync is that it's *optional*. Should an
|
||||
implementation decide to go the light-client route, then they can just ignore
|
||||
optimistic sync all together.
|
||||
optimistic sync altogether.
|
||||
|
||||
### What if `TERMINAL_BLOCK_HASH` is used?
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.3.0-rc.3
|
||||
1.3.0
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
from eth_utils import encode_hex
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
@@ -8,24 +11,80 @@ import sys
|
||||
import json
|
||||
from typing import Iterable, AnyStr, Any, Callable
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from filelock import FileLock
|
||||
from snappy import compress
|
||||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
from .settings import (
|
||||
GENERATOR_MODE,
|
||||
MODE_MULTIPROCESSING,
|
||||
MODE_SINGLE_PROCESS,
|
||||
NUM_PROCESS,
|
||||
TIME_THRESHOLD_TO_PRINT,
|
||||
)
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
|
||||
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
||||
@dataclass
|
||||
class Diagnostics(object):
|
||||
collected_test_count: int = 0
|
||||
generated_test_count: int = 0
|
||||
skipped_test_count: int = 0
|
||||
test_identifiers: list = field(default_factory=list)
|
||||
|
||||
|
||||
TestCaseParams = namedtuple(
|
||||
'TestCaseParams', [
|
||||
'test_case', 'case_dir', 'log_file', 'file_mode',
|
||||
])
|
||||
|
||||
|
||||
def worker_function(item):
|
||||
return generate_test_vector(*item)
|
||||
|
||||
|
||||
def get_default_yaml():
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def get_cfg_yaml():
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
return cfg_yaml
|
||||
|
||||
|
||||
def validate_output_dir(path_str):
|
||||
@@ -40,6 +99,47 @@ def validate_output_dir(path_str):
|
||||
return path
|
||||
|
||||
|
||||
def get_test_case_dir(test_case, output_dir):
|
||||
return (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
|
||||
|
||||
def get_test_identifier(test_case):
|
||||
return "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
|
||||
|
||||
def get_incomplete_tag_file(case_dir):
|
||||
return case_dir / "INCOMPLETE"
|
||||
|
||||
|
||||
def should_skip_case_dir(case_dir, is_force, diagnostics_obj):
|
||||
is_skip = False
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
|
||||
if case_dir.exists():
|
||||
if not is_force and not incomplete_tag_file.exists():
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
is_skip = True
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
|
||||
return is_skip, diagnostics_obj
|
||||
|
||||
|
||||
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
"""
|
||||
Implementation for a general test generator.
|
||||
@@ -94,28 +194,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
else:
|
||||
file_mode = "w"
|
||||
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
|
||||
log_file = Path(output_dir) / 'testgen_error_log.txt'
|
||||
|
||||
print(f"Generating tests into {output_dir}")
|
||||
@@ -129,12 +207,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
|
||||
|
||||
collect_only = args.collect_only
|
||||
collected_test_count = 0
|
||||
generated_test_count = 0
|
||||
skipped_test_count = 0
|
||||
test_identifiers = []
|
||||
|
||||
diagnostics_obj = Diagnostics()
|
||||
provider_start = time.time()
|
||||
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
all_test_case_params = []
|
||||
|
||||
for tprov in test_providers:
|
||||
if not collect_only:
|
||||
# runs anything that we don't want to repeat for every test case.
|
||||
@@ -145,146 +224,133 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
if len(presets) != 0 and test_case.preset_name not in presets:
|
||||
continue
|
||||
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
collected_test_count += 1
|
||||
case_dir = get_test_case_dir(test_case, output_dir)
|
||||
print(f"Collected test at: {case_dir}")
|
||||
diagnostics_obj.collected_test_count += 1
|
||||
|
||||
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||
is_skip, diagnostics_obj = should_skip_case_dir(case_dir, args.force, diagnostics_obj)
|
||||
if is_skip:
|
||||
continue
|
||||
|
||||
if case_dir.exists():
|
||||
if not args.force and not incomplete_tag_file.exists():
|
||||
skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
continue
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
if GENERATOR_MODE == MODE_SINGLE_PROCESS:
|
||||
result = generate_test_vector(test_case, case_dir, log_file, file_mode)
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
elif GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
item = TestCaseParams(test_case, case_dir, log_file, file_mode)
|
||||
all_test_case_params.append(item)
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
with Pool(processes=NUM_PROCESS) as pool:
|
||||
results = pool.map(worker_function, iter(all_test_case_params))
|
||||
|
||||
written_part = False
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
try:
|
||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except IOError as e:
|
||||
error_message = (
|
||||
f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
)
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
|
||||
sys.exit(error_message)
|
||||
|
||||
meta = dict()
|
||||
|
||||
try:
|
||||
for (name, out_kind, data) in test_case.case_fn():
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
assert False # Unknown kind
|
||||
except SkippedTest as e:
|
||||
print(e)
|
||||
skipped_test_count += 1
|
||||
shutil.rmtree(case_dir)
|
||||
continue
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part("data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
except Exception as e:
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
shutil.rmtree(case_dir)
|
||||
else:
|
||||
generated_test_count += 1
|
||||
test_identifier = "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
test_identifiers.append(test_identifier)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
for result in results:
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
|
||||
provider_end = time.time()
|
||||
span = round(provider_end - provider_start, 2)
|
||||
|
||||
if collect_only:
|
||||
print(f"Collected {collected_test_count} tests in total")
|
||||
print(f"Collected {diagnostics_obj.collected_test_count} tests in total")
|
||||
else:
|
||||
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
|
||||
summary_message += f" ({skipped_test_count} skipped tests)"
|
||||
summary_message = f"completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests"
|
||||
summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)"
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
summary_message += f" in {span} seconds"
|
||||
print(summary_message)
|
||||
diagnostics = {
|
||||
"collected_test_count": collected_test_count,
|
||||
"generated_test_count": generated_test_count,
|
||||
"skipped_test_count": skipped_test_count,
|
||||
"test_identifiers": test_identifiers,
|
||||
|
||||
diagnostics_output = {
|
||||
"collected_test_count": diagnostics_obj.collected_test_count,
|
||||
"generated_test_count": diagnostics_obj.generated_test_count,
|
||||
"skipped_test_count": diagnostics_obj.skipped_test_count,
|
||||
"test_identifiers": diagnostics_obj.test_identifiers,
|
||||
"durations": [f"{span} seconds"],
|
||||
}
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock"))
|
||||
with diagnostics_lock:
|
||||
diagnostics_path.touch(exist_ok=True)
|
||||
if os.path.getsize(diagnostics_path) == 0:
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(diagnostics, f)
|
||||
json.dump(diagnostics_output, f)
|
||||
else:
|
||||
with open(diagnostics_path, "r+") as f:
|
||||
existing_diagnostics = json.load(f)
|
||||
for k, v in diagnostics.items():
|
||||
for k, v in diagnostics_output.items():
|
||||
existing_diagnostics[k] += v
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(existing_diagnostics, f)
|
||||
print(f"wrote diagnostics to {diagnostics_path}")
|
||||
print(f"wrote diagnostics_obj to {diagnostics_path}")
|
||||
|
||||
|
||||
def generate_test_vector(test_case, case_dir, log_file, file_mode):
|
||||
cfg_yaml = get_cfg_yaml()
|
||||
yaml = get_default_yaml()
|
||||
|
||||
written_part = False
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
result = None
|
||||
try:
|
||||
meta = dict()
|
||||
try:
|
||||
written_part, meta = execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml)
|
||||
except SkippedTest as e:
|
||||
result = 0 # 0 means skipped
|
||||
print(e)
|
||||
shutil.rmtree(case_dir)
|
||||
return result
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part(case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
except Exception as e:
|
||||
result = -1 # -1 means error
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
print(f"[Error] test case {case_dir} did not produce any written_part")
|
||||
shutil.rmtree(case_dir)
|
||||
result = -1
|
||||
else:
|
||||
result = get_test_identifier(test_case)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_result_into_diagnostics_obj(result, diagnostics_obj):
|
||||
if result == -1: # error
|
||||
pass
|
||||
elif result == 0:
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
elif result is not None:
|
||||
diagnostics_obj.generated_test_count += 1
|
||||
diagnostics_obj.test_identifiers.append(result)
|
||||
else:
|
||||
raise Exception(f"Unexpected result: {result}")
|
||||
|
||||
|
||||
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
@@ -292,9 +358,45 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
out_path = case_path / Path(name + '.yaml')
|
||||
with out_path.open(file_mode) as f:
|
||||
yaml_encoder.dump(data, f)
|
||||
f.close()
|
||||
return dump
|
||||
|
||||
|
||||
def output_part(case_dir, log_file, out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except (IOError, ValueError) as e:
|
||||
error_message = f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
sys.exit(error_message)
|
||||
|
||||
|
||||
def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml):
|
||||
result = test_case.case_fn()
|
||||
written_part = False
|
||||
for (name, out_kind, data) in result:
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
raise ValueError("Unknown out_kind %s" % out_kind)
|
||||
|
||||
return written_part, meta
|
||||
|
||||
|
||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.ssz_snappy')
|
||||
|
||||
13
tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py
Normal file
13
tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import multiprocessing
|
||||
|
||||
|
||||
# Generator mode setting
|
||||
MODE_SINGLE_PROCESS = 'MODE_SINGLE_PROCESS'
|
||||
MODE_MULTIPROCESSING = 'MODE_MULTIPROCESSING'
|
||||
# Test generator mode
|
||||
GENERATOR_MODE = MODE_MULTIPROCESSING
|
||||
# Number of subprocesses when using MODE_MULTIPROCESSING
|
||||
NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1
|
||||
|
||||
# Diagnostics
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
||||
@@ -54,7 +54,15 @@ def test_genesis_random_scores(spec, state):
|
||||
#
|
||||
|
||||
def run_inactivity_scores_test(spec, state, participation_fn=None, inactivity_scores_fn=None, rng=Random(10101)):
|
||||
next_epoch_via_block(spec, state)
|
||||
while True:
|
||||
try:
|
||||
next_epoch_via_block(spec, state)
|
||||
except AssertionError:
|
||||
# If the proposer is slashed, we skip this epoch and try to propose block at the next epoch
|
||||
next_epoch(spec, state)
|
||||
else:
|
||||
break
|
||||
|
||||
if participation_fn is not None:
|
||||
participation_fn(spec, state, rng=rng)
|
||||
if inactivity_scores_fn is not None:
|
||||
@@ -363,7 +371,7 @@ def test_randomized_state(spec, state):
|
||||
their inactivity score does not change.
|
||||
"""
|
||||
rng = Random(10011001)
|
||||
_run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng)
|
||||
yield from _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@@ -377,6 +385,6 @@ def test_randomized_state_leaking(spec, state):
|
||||
(refer ``get_eligible_validator_indices`).
|
||||
"""
|
||||
rng = Random(10011002)
|
||||
_run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng)
|
||||
yield from _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng)
|
||||
# Check still in leak
|
||||
assert spec.is_in_inactivity_leak(state)
|
||||
|
||||
@@ -26,6 +26,7 @@ from eth2spec.test.helpers.fork_transition import (
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella, is_post_deneb,
|
||||
is_post_fork,
|
||||
is_post_eip6110,
|
||||
)
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
@@ -57,6 +58,10 @@ def needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
return is_post_deneb(s_spec) and not is_post_deneb(d_spec)
|
||||
|
||||
|
||||
def needs_upgrade_to_eip6110(d_spec, s_spec):
|
||||
return is_post_eip6110(s_spec) and not is_post_eip6110(d_spec)
|
||||
|
||||
|
||||
def check_lc_header_equal(d_spec, s_spec, data, upgraded):
|
||||
assert upgraded.beacon.slot == data.beacon.slot
|
||||
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
|
||||
@@ -84,6 +89,10 @@ def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_eip6110(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_eip6110(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
@@ -145,6 +154,8 @@ class LightClientSyncTest(object):
|
||||
|
||||
|
||||
def get_store_fork_version(s_spec):
|
||||
if is_post_eip6110(s_spec):
|
||||
return s_spec.config.EIP6110_FORK_VERSION
|
||||
if is_post_deneb(s_spec):
|
||||
return s_spec.config.DENEB_FORK_VERSION
|
||||
if is_post_capella(s_spec):
|
||||
@@ -668,10 +679,9 @@ def run_test_single_fork(spec, phases, state, fork):
|
||||
# Upgrade to post-fork spec, attested block is still before the fork
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False)
|
||||
sync_aggregate, _ = get_sync_aggregate(phases[fork], state)
|
||||
state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate)
|
||||
spec = phases[fork]
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
@@ -755,18 +765,16 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
|
||||
# ..., attested is from `fork_1`, ...
|
||||
fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH')
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1)
|
||||
state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False)
|
||||
state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch)
|
||||
spec = phases[fork_1]
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
|
||||
# ..., and signature is from `fork_2`
|
||||
fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH')
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1)
|
||||
state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False)
|
||||
sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state)
|
||||
state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate)
|
||||
spec = phases[fork_2]
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
|
||||
# Check that update applies
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
|
||||
@@ -8,11 +8,13 @@ from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_alta
|
||||
from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal
|
||||
from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal
|
||||
from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal
|
||||
from eth2spec.eip6110 import mainnet as spec_eip6110_mainnet, minimal as spec_eip6110_minimal
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
EIP6110,
|
||||
MINIMAL, MAINNET,
|
||||
ALL_PHASES,
|
||||
ALL_FORK_UPGRADES,
|
||||
@@ -79,13 +81,15 @@ spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
BELLATRIX: spec_bellatrix_minimal,
|
||||
CAPELLA: spec_capella_minimal,
|
||||
DENEB: spec_deneb_minimal,
|
||||
EIP6110: spec_eip6110_minimal,
|
||||
},
|
||||
MAINNET: {
|
||||
PHASE0: spec_phase0_mainnet,
|
||||
ALTAIR: spec_altair_mainnet,
|
||||
BELLATRIX: spec_bellatrix_mainnet,
|
||||
CAPELLA: spec_capella_mainnet,
|
||||
DENEB: spec_deneb_mainnet
|
||||
DENEB: spec_deneb_mainnet,
|
||||
EIP6110: spec_eip6110_mainnet,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -428,6 +432,7 @@ with_altair_and_later = with_all_phases_from(ALTAIR)
|
||||
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
|
||||
with_capella_and_later = with_all_phases_from(CAPELLA)
|
||||
with_deneb_and_later = with_all_phases_from(DENEB)
|
||||
with_eip6110_and_later = with_all_phases_from(EIP6110)
|
||||
|
||||
|
||||
def _get_preset_targets(kw):
|
||||
@@ -555,7 +560,7 @@ def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return result
|
||||
|
||||
|
||||
def _get_copy_of_spec(spec):
|
||||
def get_copy_of_spec(spec):
|
||||
fork = spec.fork
|
||||
preset = spec.config.PRESET_BASE
|
||||
module_path = f"eth2spec.{fork}.{preset}"
|
||||
@@ -596,14 +601,14 @@ def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
# Apply config overrides to spec
|
||||
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
|
||||
spec, output_config = spec_with_config_overrides(get_copy_of_spec(spec), config_overrides)
|
||||
|
||||
# Apply config overrides to additional phases, if present
|
||||
if 'phases' in kw:
|
||||
phases = {}
|
||||
for fork in kw['phases']:
|
||||
phases[fork], output = spec_with_config_overrides(
|
||||
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
if emitted_fork == fork:
|
||||
output_config = output
|
||||
kw['phases'] = phases
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block
|
||||
)
|
||||
@@ -16,15 +18,14 @@ from eth2spec.test.helpers.sharding import (
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_one_blob(spec, state):
|
||||
def run_block_with_blobs(spec, state, blob_count, excess_data_gas=1):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.excess_data_gas = excess_data_gas
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
@@ -32,13 +33,136 @@ def test_one_blob(spec, state):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_zero_blob(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_one_blob(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=1)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_max_blobs(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_blob_tx_type(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
opaque_tx = b'\x04' + opaque_tx[1:] # incorrect tx type
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_transaction_length_1_byte(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
opaque_tx = opaque_tx + b'\x12' # incorrect tx length
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_transaction_length_32_bytes(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
opaque_tx = opaque_tx + b'\x12' * 32 # incorrect tx length
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_commitment(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
blob_kzg_commitments[0] = b'\x12' * 48 # incorrect commitment
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_commitments_order(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=2, rng=random.Random(1111))
|
||||
block.body.blob_kzg_commitments = [blob_kzg_commitments[1], blob_kzg_commitments[0]] # incorrect order
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_block_hash(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = b'\x12' * 32 # incorrect block hash
|
||||
# CL itself doesn't verify EL block hash
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_zeroed_commitment(spec, state):
|
||||
"""
|
||||
The blob is invalid, but the commitment is in correct form.
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1, is_valid_blob=False)
|
||||
assert all(commitment == b'\x00' * 48 for commitment in blob_kzg_commitments)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
@@ -18,14 +18,13 @@ from eth2spec.test.helpers.sharding import (
|
||||
|
||||
def _run_validate_blobs(spec, state, blob_count):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Also test the proof generation in `get_blob_sidecars`
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs)
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs)
|
||||
blobs = [sidecar.blob for sidecar in blob_sidecars]
|
||||
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
|
||||
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
|
||||
|
||||
@@ -1,20 +1,80 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
spec_test,
|
||||
single_phase,
|
||||
with_deneb_and_later,
|
||||
expect_assertion_error,
|
||||
always_bls
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
get_poly_in_both_forms,
|
||||
eval_poly_in_coeff_form,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import BLS_MODULUS
|
||||
|
||||
G1 = bls.G1_to_bytes48(bls.G1())
|
||||
P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
|
||||
"0123456789abcdef0123456789abcdef0123456789abcdef")
|
||||
P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
|
||||
"0123456789abcdef0123456789abcdef0123456789abcde0")
|
||||
|
||||
|
||||
def bls_add_one(x):
|
||||
"""
|
||||
Adds "one" (actually bls.G1()) to a compressed group element.
|
||||
Useful to compute definitely incorrect proofs.
|
||||
"""
|
||||
return bls.G1_to_bytes48(
|
||||
bls.add(bls.bytes48_to_G1(x), bls.G1())
|
||||
)
|
||||
|
||||
|
||||
def field_element_bytes(x):
|
||||
return int.to_bytes(x % BLS_MODULUS, 32, "little")
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_verify_kzg_proof(spec, state):
|
||||
x = 3
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_kzg_proof(spec):
|
||||
"""
|
||||
Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs.
|
||||
"""
|
||||
x = field_element_bytes(3)
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
proof, y = spec.compute_kzg_proof(blob, x)
|
||||
|
||||
assert spec.verify_kzg_proof(commitment, x, y, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_kzg_proof_incorrect_proof(spec):
|
||||
"""
|
||||
Test the wrapper function `verify_kzg_proof` fails on an incorrect proof.
|
||||
"""
|
||||
x = field_element_bytes(3465)
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
proof, y = spec.compute_kzg_proof(blob, x)
|
||||
proof = bls_add_one(proof)
|
||||
|
||||
assert not spec.verify_kzg_proof(commitment, x, y, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_kzg_proof_impl(spec):
|
||||
"""
|
||||
Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs.
|
||||
"""
|
||||
x = BLS_MODULUS - 1
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
@@ -24,8 +84,26 @@ def test_verify_kzg_proof(spec, state):
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_barycentric_outside_domain(spec, state):
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_kzg_proof_impl_incorrect_proof(spec):
|
||||
"""
|
||||
Test the implementation function `verify_kzg_proof` fails on an incorrect proof.
|
||||
"""
|
||||
x = 324561
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
proof, y = spec.compute_kzg_proof_impl(polynomial, x)
|
||||
proof = bls_add_one(proof)
|
||||
|
||||
assert not spec.verify_kzg_proof_impl(commitment, x, y, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_barycentric_outside_domain(spec):
|
||||
"""
|
||||
Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain
|
||||
(the roots of unity).
|
||||
@@ -42,9 +120,9 @@ def test_barycentric_outside_domain(spec, state):
|
||||
|
||||
for _ in range(n_samples):
|
||||
# Get a random evaluation point and make sure it's not a root of unity
|
||||
z = rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
z = rng.randint(0, BLS_MODULUS - 1)
|
||||
while z in roots_of_unity_brp:
|
||||
z = rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
z = rng.randint(0, BLS_MODULUS - 1)
|
||||
|
||||
# Get p(z) by evaluating poly in coefficient form
|
||||
p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z)
|
||||
@@ -57,8 +135,9 @@ def test_barycentric_outside_domain(spec, state):
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_barycentric_within_domain(spec, state):
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_barycentric_within_domain(spec):
|
||||
"""
|
||||
Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain
|
||||
(the roots of unity).
|
||||
@@ -89,8 +168,9 @@ def test_barycentric_within_domain(spec, state):
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_compute_kzg_proof_within_domain(spec, state):
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_kzg_proof_within_domain(spec):
|
||||
"""
|
||||
Create and verify KZG proof that p(z) == y
|
||||
where z is in the domain of our KZG scheme (i.e. a relevant root of unity).
|
||||
@@ -105,3 +185,147 @@ def test_compute_kzg_proof_within_domain(spec, state):
|
||||
proof, y = spec.compute_kzg_proof_impl(polynomial, z)
|
||||
|
||||
assert spec.verify_kzg_proof_impl(commitment, z, y, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_blob_kzg_proof(spec):
|
||||
"""
|
||||
Test the functions to compute and verify a blob KZG proof
|
||||
"""
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
proof = spec.compute_blob_kzg_proof(blob, commitment)
|
||||
|
||||
assert spec.verify_blob_kzg_proof(blob, commitment, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_blob_kzg_proof_incorrect_proof(spec):
|
||||
"""
|
||||
Check that `verify_blob_kzg_proof` fails on an incorrect proof
|
||||
"""
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
proof = spec.compute_blob_kzg_proof(blob, commitment)
|
||||
proof = bls_add_one(proof)
|
||||
|
||||
assert not spec.verify_blob_kzg_proof(blob, commitment, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_bls_modular_inverse(spec):
|
||||
"""
|
||||
Verify computation of multiplicative inverse
|
||||
"""
|
||||
rng = random.Random(5566)
|
||||
|
||||
# Should fail for x == 0
|
||||
expect_assertion_error(lambda: spec.bls_modular_inverse(0))
|
||||
expect_assertion_error(lambda: spec.bls_modular_inverse(spec.BLS_MODULUS))
|
||||
expect_assertion_error(lambda: spec.bls_modular_inverse(2 * spec.BLS_MODULUS))
|
||||
|
||||
# Test a trivial inversion
|
||||
assert 1 == int(spec.bls_modular_inverse(1))
|
||||
|
||||
# Test a random inversion
|
||||
r = rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
r_inv = int(spec.bls_modular_inverse(r))
|
||||
assert r * r_inv % BLS_MODULUS == 1
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_validate_kzg_g1_generator(spec):
|
||||
"""
|
||||
Verify that `validate_kzg_g1` allows the generator G1
|
||||
"""
|
||||
|
||||
spec.validate_kzg_g1(bls.G1_to_bytes48(bls.G1()))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_validate_kzg_g1_neutral_element(spec):
|
||||
"""
|
||||
Verify that `validate_kzg_g1` allows the neutral element in G1
|
||||
"""
|
||||
|
||||
spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1()))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_validate_kzg_g1_not_in_g1(spec):
|
||||
"""
|
||||
Verify that `validate_kzg_g1` fails on point not in G1
|
||||
"""
|
||||
|
||||
expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_IN_G1))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_validate_kzg_g1_not_on_curve(spec):
|
||||
"""
|
||||
Verify that `validate_kzg_g1` fails on point not in G1
|
||||
"""
|
||||
|
||||
expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_ON_CURVE))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_bytes_to_bls_field_zero(spec):
|
||||
"""
|
||||
Verify that `bytes_to_bls_field` handles zero
|
||||
"""
|
||||
|
||||
spec.bytes_to_bls_field(b"\0" * 32)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_bytes_to_bls_field_modulus_minus_one(spec):
|
||||
"""
|
||||
Verify that `bytes_to_bls_field` handles modulus minus one
|
||||
"""
|
||||
|
||||
spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_bytes_to_bls_field_modulus(spec):
|
||||
"""
|
||||
Verify that `bytes_to_bls_field` fails on BLS modulus
|
||||
"""
|
||||
|
||||
expect_assertion_error(lambda: spec.bytes_to_bls_field(
|
||||
BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)
|
||||
))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_bytes_to_bls_field_max(spec):
|
||||
"""
|
||||
Verify that `bytes_to_bls_field` fails on 2**256 - 1
|
||||
"""
|
||||
|
||||
expect_assertion_error(lambda: spec.bytes_to_bls_field(b"\xFF" * 32))
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
|
||||
from eth2spec.test.helpers.constants import (
|
||||
DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
)
|
||||
|
||||
|
||||
@with_phases([DENEB])
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_blob_to_kzg_commitment(spec, state):
|
||||
blob = get_sample_blob(spec)
|
||||
spec.blob_to_kzg_commitment(blob)
|
||||
@@ -17,7 +17,7 @@ from eth2spec.test.context import (
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_tx_peek_blob_versioned_hashes(spec, state):
|
||||
otx, blobs, commitments = get_sample_opaque_tx(spec)
|
||||
otx, _, commitments, _ = get_sample_opaque_tx(spec)
|
||||
data_hashes = spec.tx_peek_blob_versioned_hashes(otx)
|
||||
expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments]
|
||||
assert expected == data_hashes
|
||||
|
||||
@@ -0,0 +1,158 @@
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
expect_assertion_error
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.helpers.keys import (
|
||||
pubkey_to_privkey
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
spec.validate_blobs_and_kzg_commitments(block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs[:-1],
|
||||
blob_kzg_commitments,
|
||||
proofs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs[:-1]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:])
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_blob_sidecar_signature(spec, state):
|
||||
"""
|
||||
Test `get_blob_sidecar_signature`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
|
||||
proposer = state.validators[blob_sidecars[1].proposer_index]
|
||||
privkey = pubkey_to_privkey[proposer.pubkey]
|
||||
sidecar_signature = spec.get_blob_sidecar_signature(state,
|
||||
blob_sidecars[1],
|
||||
privkey)
|
||||
|
||||
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
|
||||
|
||||
assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_blob_sidecar_signature_incorrect(spec, state):
|
||||
"""
|
||||
Test `get_blob_sidecar_signature`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
|
||||
|
||||
sidecar_signature = spec.get_blob_sidecar_signature(state,
|
||||
blob_sidecars[1],
|
||||
123)
|
||||
|
||||
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
|
||||
|
||||
assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
|
||||
0
tests/core/pyspec/eth2spec/test/eip6110/__init__.py
Normal file
0
tests/core/pyspec/eth2spec/test/eip6110/__init__.py
Normal file
@@ -0,0 +1,282 @@
|
||||
from eth2spec.test.context import spec_state_test, always_bls, with_eip6110_and_later
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_deposit_receipt,
|
||||
run_deposit_receipt_processing,
|
||||
run_deposit_receipt_processing_with_specific_fork_version
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block
|
||||
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_new_deposit_under_max(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
# effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_new_deposit_max(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
# effective balance will be exactly the same as balance.
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_new_deposit_over_max(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
# just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_new_deposit_eth1_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
+ b'\x00' * 11 # specified 0s
|
||||
+ b'\x59' * 20 # a 20-byte eth1 address
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit_receipt = prepare_deposit_receipt(
|
||||
spec,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
b'\xFF' # Non specified withdrawal credentials version
|
||||
+ b'\x02' * 31 # Garabage bytes
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit_receipt = prepare_deposit_receipt(
|
||||
spec,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_correct_sig_but_forked_state(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
# deposits will always be valid, regardless of the current fork
|
||||
state.fork.current_version = spec.Version('0x1234abcd')
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_incorrect_sig_new_deposit(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, effective=False)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_top_up__max_effective_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE
|
||||
state.validators[validator_index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE + amount
|
||||
assert state.validators[validator_index].effective_balance == spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_top_up__less_effective_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
initial_balance = spec.MAX_EFFECTIVE_BALANCE - 1000
|
||||
initial_effective_balance = spec.MAX_EFFECTIVE_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
state.balances[validator_index] = initial_balance
|
||||
state.validators[validator_index].effective_balance = initial_effective_balance
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
assert state.balances[validator_index] == initial_balance + amount
|
||||
# unchanged effective balance
|
||||
assert state.validators[validator_index].effective_balance == initial_effective_balance
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_top_up__zero_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
||||
|
||||
initial_balance = 0
|
||||
initial_effective_balance = 0
|
||||
state.balances[validator_index] = initial_balance
|
||||
state.validators[validator_index].effective_balance = initial_effective_balance
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
assert state.balances[validator_index] == initial_balance + amount
|
||||
# unchanged effective balance
|
||||
assert state.validators[validator_index].effective_balance == initial_effective_balance
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_incorrect_sig_top_up(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
|
||||
|
||||
# invalid signatures, in top-ups, are allowed!
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_withdrawal_credentials_top_up(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
|
||||
deposit_receipt = prepare_deposit_receipt(
|
||||
spec,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials
|
||||
)
|
||||
|
||||
# inconsistent withdrawal credentials, in top-ups, are allowed!
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_key_validate_invalid_subgroup(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
# All-zero pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
|
||||
pubkey = b'\x00' * 48
|
||||
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_key_validate_invalid_decompression(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
# `deserialization_fails_infinity_with_true_b_flag` BLS G1 deserialization test case.
|
||||
# This pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
|
||||
pubkey_hex = 'c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
|
||||
pubkey = bytes.fromhex(pubkey_hex)
|
||||
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_ineffective_deposit_with_previous_fork_version(spec, state):
|
||||
# Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`.
|
||||
# It's an ineffective deposit because it fails at BLS sig verification.
|
||||
# NOTE: it was effective in Altair.
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from run_deposit_receipt_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
effective=False,
|
||||
)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_effective_deposit_with_genesis_fork_version(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from run_deposit_receipt_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.config.GENESIS_FORK_VERSION,
|
||||
)
|
||||
|
||||
|
||||
@with_eip6110_and_later
|
||||
@spec_state_test
|
||||
def test_success_top_up_to_withdrawn_validator(spec, state):
|
||||
validator_index = 0
|
||||
|
||||
# Fully withdraw validator
|
||||
set_validator_fully_withdrawable(spec, state, validator_index)
|
||||
assert state.balances[validator_index] > 0
|
||||
next_epoch_via_block(spec, state)
|
||||
assert state.balances[validator_index] == 0
|
||||
assert state.validators[validator_index].effective_balance > 0
|
||||
next_epoch_via_block(spec, state)
|
||||
assert state.validators[validator_index].effective_balance == 0
|
||||
|
||||
# Make a top-up balance to validator
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, len(state.validators), signed=True)
|
||||
|
||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
||||
|
||||
assert state.balances[validator_index] == amount
|
||||
assert state.validators[validator_index].effective_balance == 0
|
||||
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
assert spec.is_fully_withdrawable_validator(validator, balance, current_epoch)
|
||||
@@ -0,0 +1 @@
|
||||
from .test_deposit_transition import * # noqa: F401 F403
|
||||
@@ -0,0 +1,229 @@
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
EIP6110,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
build_deposit_data,
|
||||
deposit_from_context,
|
||||
prepare_deposit_receipt,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block
|
||||
)
|
||||
|
||||
|
||||
def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True):
|
||||
"""
|
||||
Run ``process_block``, yielding:
|
||||
- pre-state ('pre')
|
||||
- block ('block')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, not valid)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state if valid else None
|
||||
|
||||
# Check that deposits are applied
|
||||
if valid:
|
||||
expected_pubkeys = [d.data.pubkey for d in block.body.deposits]
|
||||
deposit_receipts = block.body.execution_payload.deposit_receipts
|
||||
expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)]
|
||||
actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]]
|
||||
|
||||
assert actual_pubkeys == expected_pubkeys
|
||||
|
||||
|
||||
def prepare_state_and_block(spec,
|
||||
state,
|
||||
deposit_cnt,
|
||||
deposit_receipt_cnt,
|
||||
first_deposit_receipt_index=0,
|
||||
deposit_receipts_start_index=None,
|
||||
eth1_data_deposit_count=None):
|
||||
deposits = []
|
||||
deposit_receipts = []
|
||||
keypair_index = len(state.validators)
|
||||
|
||||
# Prepare deposits
|
||||
deposit_data_list = []
|
||||
for index in range(deposit_cnt):
|
||||
deposit_data = build_deposit_data(spec,
|
||||
pubkeys[keypair_index],
|
||||
privkeys[keypair_index],
|
||||
# use max effective balance
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
# insecurely use pubkey as withdrawal key
|
||||
spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkeys[keypair_index])[1:],
|
||||
signed=True)
|
||||
deposit_data_list.append(deposit_data)
|
||||
keypair_index += 1
|
||||
|
||||
deposit_root = None
|
||||
for index in range(deposit_cnt):
|
||||
deposit, deposit_root, _ = deposit_from_context(spec, deposit_data_list, index)
|
||||
deposits.append(deposit)
|
||||
|
||||
if deposit_root:
|
||||
state.eth1_deposit_index = 0
|
||||
if not eth1_data_deposit_count:
|
||||
eth1_data_deposit_count = deposit_cnt
|
||||
state.eth1_data = spec.Eth1Data(deposit_root=deposit_root,
|
||||
deposit_count=eth1_data_deposit_count,
|
||||
block_hash=state.eth1_data.block_hash)
|
||||
|
||||
# Prepare deposit receipts
|
||||
for offset in range(deposit_receipt_cnt):
|
||||
deposit_receipt = prepare_deposit_receipt(spec,
|
||||
keypair_index,
|
||||
# use max effective balance
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
first_deposit_receipt_index + offset,
|
||||
signed=True)
|
||||
deposit_receipts.append(deposit_receipt)
|
||||
keypair_index += 1
|
||||
|
||||
# Set start index if defined
|
||||
if deposit_receipts_start_index:
|
||||
state.deposit_receipts_start_index = deposit_receipts_start_index
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Assign deposits and deposit receipts
|
||||
block.body.deposits = deposits
|
||||
block.body.execution_payload.deposit_receipts = deposit_receipts
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
return state, block
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__start_index_is_set(spec, state):
|
||||
# 0 deposits, 2 deposit receipts, unset deposit_receipts_start_index
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=0,
|
||||
deposit_receipt_cnt=2,
|
||||
first_deposit_receipt_index=state.eth1_data.deposit_count + 11)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block)
|
||||
|
||||
# deposit_receipts_start_index must be set to the index of the first receipt
|
||||
assert state.deposit_receipts_start_index == block.body.execution_payload.deposit_receipts[0].index
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__process_eth1_deposits(spec, state):
|
||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=3,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=11,
|
||||
deposit_receipts_start_index=7)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__process_max_eth1_deposits(spec, state):
|
||||
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
|
||||
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=spec.MAX_DEPOSITS,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=spec.MAX_DEPOSITS + 1,
|
||||
deposit_receipts_start_index=spec.MAX_DEPOSITS,
|
||||
eth1_data_deposit_count=23)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state):
|
||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count == state.deposit_receipts_start_index
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=3,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=7,
|
||||
deposit_receipts_start_index=3)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state):
|
||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=3,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=29,
|
||||
deposit_receipts_start_index=23,
|
||||
eth1_data_deposit_count=17)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state):
|
||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.eth1_data_index
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=3,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=11,
|
||||
deposit_receipts_start_index=7,
|
||||
eth1_data_deposit_count=2)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(spec, state):
|
||||
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
|
||||
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS - 1
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=spec.MAX_DEPOSITS,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=spec.MAX_DEPOSITS,
|
||||
deposit_receipts_start_index=spec.MAX_DEPOSITS - 1,
|
||||
eth1_data_deposit_count=23)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_phases([EIP6110])
|
||||
@spec_state_test
|
||||
def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
|
||||
# 1 deposit, 1 deposit receipt that top ups deposited validator
|
||||
state, block = prepare_state_and_block(spec, state,
|
||||
deposit_cnt=1,
|
||||
deposit_receipt_cnt=1,
|
||||
first_deposit_receipt_index=11,
|
||||
deposit_receipts_start_index=7)
|
||||
|
||||
# Artificially assign deposit's pubkey to a deposit receipt of the same block
|
||||
top_up_keys = [block.body.deposits[0].data.pubkey]
|
||||
block.body.execution_payload.deposit_receipts[0].pubkey = top_up_keys[0]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
yield from run_deposit_transition_block(spec, state, block, top_up_keys=top_up_keys)
|
||||
|
||||
# Check the top up
|
||||
expected_balance = block.body.deposits[0].data.amount + block.body.execution_payload.deposit_receipts[0].amount
|
||||
assert state.balances[len(state.balances) - 1] == expected_balance
|
||||
@@ -187,7 +187,7 @@ def add_attestations_to_state(spec, state, attestations, slot):
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
|
||||
def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
|
||||
def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
|
||||
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
|
||||
for index in range(committees_per_slot):
|
||||
def participants_filter(comm):
|
||||
@@ -262,7 +262,7 @@ def state_transition_with_full_block(spec,
|
||||
if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)):
|
||||
attestations = _get_valid_attestation_at_slot(
|
||||
attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
slot_to_attest,
|
||||
@@ -272,7 +272,7 @@ def state_transition_with_full_block(spec,
|
||||
block.body.attestations.append(attestation)
|
||||
if fill_prev_epoch:
|
||||
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
|
||||
attestations = _get_valid_attestation_at_slot(
|
||||
attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
slot_to_attest,
|
||||
@@ -300,7 +300,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
|
||||
slots = state.slot % spec.SLOTS_PER_EPOCH
|
||||
for slot_offset in range(slots):
|
||||
target_slot = state.slot - slot_offset
|
||||
attestations += _get_valid_attestation_at_slot(
|
||||
attestations += get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
target_slot,
|
||||
@@ -311,7 +311,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
|
||||
slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH
|
||||
for slot_offset in range(1, slots):
|
||||
target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset
|
||||
attestations += _get_valid_attestation_at_slot(
|
||||
attestations += get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
target_slot,
|
||||
|
||||
@@ -9,30 +9,31 @@ PHASE0 = SpecForkName('phase0')
|
||||
ALTAIR = SpecForkName('altair')
|
||||
BELLATRIX = SpecForkName('bellatrix')
|
||||
CAPELLA = SpecForkName('capella')
|
||||
DENEB = SpecForkName('deneb')
|
||||
|
||||
# Experimental phases (not included in default "ALL_PHASES"):
|
||||
SHARDING = SpecForkName('sharding')
|
||||
CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
DENEB = SpecForkName('deneb')
|
||||
EIP6110 = SpecForkName('eip6110')
|
||||
|
||||
# The forks that pytest can run with.
|
||||
ALL_PHASES = (
|
||||
# Formal forks
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA,
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
# Experimental patches
|
||||
DENEB,
|
||||
EIP6110,
|
||||
)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB)
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110)
|
||||
|
||||
# TODO: no DENEB fork tests now. Should add when we figure out the content of Capella.
|
||||
ALL_FORK_UPGRADES = {
|
||||
# pre_fork_name: post_fork_name
|
||||
PHASE0: ALTAIR,
|
||||
ALTAIR: BELLATRIX,
|
||||
BELLATRIX: CAPELLA,
|
||||
CAPELLA: DENEB,
|
||||
DENEB: EIP6110,
|
||||
}
|
||||
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
|
||||
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0}
|
||||
|
||||
@@ -171,6 +171,54 @@ def prepare_state_and_deposit(spec, state, validator_index, amount,
|
||||
return deposit
|
||||
|
||||
|
||||
def build_deposit_receipt(spec,
|
||||
index,
|
||||
pubkey,
|
||||
privkey,
|
||||
amount,
|
||||
withdrawal_credentials,
|
||||
signed):
|
||||
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed)
|
||||
return spec.DepositReceipt(
|
||||
pubkey=deposit_data.pubkey,
|
||||
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
||||
amount=deposit_data.amount,
|
||||
signature=deposit_data.signature,
|
||||
index=index)
|
||||
|
||||
|
||||
def prepare_deposit_receipt(spec, validator_index, amount,
|
||||
index=None,
|
||||
pubkey=None,
|
||||
privkey=None,
|
||||
withdrawal_credentials=None,
|
||||
signed=False):
|
||||
"""
|
||||
Create a deposit receipt for the given validator, depositing the given amount.
|
||||
"""
|
||||
if index is None:
|
||||
index = validator_index
|
||||
|
||||
if pubkey is None:
|
||||
pubkey = pubkeys[validator_index]
|
||||
|
||||
if privkey is None:
|
||||
privkey = privkeys[validator_index]
|
||||
|
||||
# insecurely use pubkey as withdrawal key if no credentials provided
|
||||
if withdrawal_credentials is None:
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
|
||||
return build_deposit_receipt(
|
||||
spec,
|
||||
index,
|
||||
pubkey,
|
||||
privkey,
|
||||
amount,
|
||||
withdrawal_credentials,
|
||||
signed,
|
||||
)
|
||||
|
||||
#
|
||||
# Run processing
|
||||
#
|
||||
@@ -255,3 +303,90 @@ def run_deposit_processing_with_specific_fork_version(
|
||||
state.eth1_data.deposit_count = 1
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
|
||||
|
||||
|
||||
def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, valid=True, effective=True):
|
||||
"""
|
||||
Run ``process_deposit_receipt``, yielding:
|
||||
- pre-state ('pre')
|
||||
- deposit_receipt ('deposit_receipt')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
pre_validator_count = len(state.validators)
|
||||
pre_balance = 0
|
||||
is_top_up = False
|
||||
# is a top-up
|
||||
if validator_index < pre_validator_count:
|
||||
is_top_up = True
|
||||
pre_balance = get_balance(state, validator_index)
|
||||
pre_effective_balance = state.validators[validator_index].effective_balance
|
||||
|
||||
yield 'pre', state
|
||||
yield 'deposit_receipt', deposit_receipt
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_deposit_receipt(state, deposit_receipt))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_deposit_receipt(state, deposit_receipt)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
if not effective or not bls.KeyValidate(deposit_receipt.pubkey):
|
||||
assert len(state.validators) == pre_validator_count
|
||||
assert len(state.balances) == pre_validator_count
|
||||
if is_top_up:
|
||||
assert get_balance(state, validator_index) == pre_balance
|
||||
else:
|
||||
if is_top_up:
|
||||
# Top-ups do not change effective balance
|
||||
assert state.validators[validator_index].effective_balance == pre_effective_balance
|
||||
assert len(state.validators) == pre_validator_count
|
||||
assert len(state.balances) == pre_validator_count
|
||||
else:
|
||||
# new validator
|
||||
assert len(state.validators) == pre_validator_count + 1
|
||||
assert len(state.balances) == pre_validator_count + 1
|
||||
effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit_receipt.amount)
|
||||
effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
assert state.validators[validator_index].effective_balance == effective_balance
|
||||
|
||||
assert get_balance(state, validator_index) == pre_balance + deposit_receipt.amount
|
||||
|
||||
|
||||
def run_deposit_receipt_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version,
|
||||
valid=True,
|
||||
effective=True):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
pubkey = pubkeys[validator_index]
|
||||
privkey = privkeys[validator_index]
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
|
||||
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
|
||||
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
|
||||
deposit_data = spec.DepositData(
|
||||
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
||||
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
||||
)
|
||||
deposit_receipt = spec.DepositReceipt(
|
||||
pubkey=deposit_data.pubkey,
|
||||
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
||||
amount=deposit_data.amount,
|
||||
signature=deposit_data.signature,
|
||||
index=validator_index)
|
||||
|
||||
yield from run_deposit_receipt_processing(
|
||||
spec,
|
||||
state,
|
||||
deposit_receipt,
|
||||
validator_index,
|
||||
valid=valid,
|
||||
effective=effective
|
||||
)
|
||||
|
||||
@@ -4,7 +4,11 @@ from rlp import encode
|
||||
from rlp.sedes import big_endian_int, Binary, List
|
||||
|
||||
from eth2spec.debug.random_value import get_random_bytes_list
|
||||
from eth2spec.test.helpers.forks import is_post_capella, is_post_deneb
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella,
|
||||
is_post_deneb,
|
||||
is_post_eip6110,
|
||||
)
|
||||
|
||||
|
||||
def get_execution_payload_header(spec, execution_payload):
|
||||
@@ -28,6 +32,8 @@ def get_execution_payload_header(spec, execution_payload):
|
||||
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
|
||||
if is_post_deneb(spec):
|
||||
payload_header.excess_data_gas = execution_payload.excess_data_gas
|
||||
if is_post_eip6110(spec):
|
||||
payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts)
|
||||
return payload_header
|
||||
|
||||
|
||||
@@ -48,7 +54,8 @@ def compute_trie_root_from_indexed_data(data):
|
||||
def compute_el_header_block_hash(spec,
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root=None):
|
||||
withdrawals_trie_root=None,
|
||||
deposit_receipts_trie_root=None):
|
||||
"""
|
||||
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
|
||||
"""
|
||||
@@ -92,6 +99,10 @@ def compute_el_header_block_hash(spec,
|
||||
if is_post_deneb(spec):
|
||||
# excess_data_gas
|
||||
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
|
||||
if is_post_eip6110(spec):
|
||||
# deposit_receipts_root
|
||||
assert deposit_receipts_trie_root is not None
|
||||
execution_payload_header_rlp.append((Binary(32, 32), deposit_receipts_trie_root))
|
||||
|
||||
sedes = List([schema for schema, _ in execution_payload_header_rlp])
|
||||
values = [value for _, value in execution_payload_header_rlp]
|
||||
@@ -118,14 +129,37 @@ def get_withdrawal_rlp(spec, withdrawal):
|
||||
return encode(values, sedes)
|
||||
|
||||
|
||||
def get_deposit_receipt_rlp(spec, deposit_receipt):
|
||||
deposit_receipt_rlp = [
|
||||
# pubkey
|
||||
(Binary(48, 48), deposit_receipt.pubkey),
|
||||
# withdrawal_credentials
|
||||
(Binary(32, 32), deposit_receipt.withdrawal_credentials),
|
||||
# amount
|
||||
(big_endian_int, deposit_receipt.amount),
|
||||
# pubkey
|
||||
(Binary(96, 96), deposit_receipt.signature),
|
||||
# index
|
||||
(big_endian_int, deposit_receipt.index),
|
||||
]
|
||||
|
||||
sedes = List([schema for schema, _ in deposit_receipt_rlp])
|
||||
values = [value for _, value in deposit_receipt_rlp]
|
||||
return encode(values, sedes)
|
||||
|
||||
|
||||
def compute_el_block_hash(spec, payload):
|
||||
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
|
||||
|
||||
withdrawals_trie_root = None
|
||||
deposit_receipts_trie_root = None
|
||||
|
||||
if is_post_capella(spec):
|
||||
withdrawals_encoded = [get_withdrawal_rlp(spec, withdrawal) for withdrawal in payload.withdrawals]
|
||||
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
|
||||
else:
|
||||
withdrawals_trie_root = None
|
||||
if is_post_eip6110(spec):
|
||||
deposit_receipts_encoded = [get_deposit_receipt_rlp(spec, receipt) for receipt in payload.deposit_receipts]
|
||||
deposit_receipts_trie_root = compute_trie_root_from_indexed_data(deposit_receipts_encoded)
|
||||
|
||||
payload_header = get_execution_payload_header(spec, payload)
|
||||
|
||||
@@ -134,6 +168,7 @@ def compute_el_block_hash(spec, payload):
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root,
|
||||
deposit_receipts_trie_root,
|
||||
)
|
||||
|
||||
|
||||
@@ -165,6 +200,11 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||
)
|
||||
if is_post_capella(spec):
|
||||
payload.withdrawals = spec.get_expected_withdrawals(state)
|
||||
if is_post_deneb(spec):
|
||||
payload.excess_data_gas = 0
|
||||
if is_post_eip6110(spec):
|
||||
# just to be clear
|
||||
payload.deposit_receipts = []
|
||||
|
||||
payload.block_hash = compute_el_block_hash(spec, payload)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from eth2spec.test.exceptions import BlockNotFoundException
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_epoch_with_attestations,
|
||||
next_slots_with_attestations,
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
|
||||
|
||||
@@ -16,12 +17,13 @@ def get_anchor_root(spec, state):
|
||||
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True,
|
||||
merge_block=False, block_not_found=False, is_optimistic=False):
|
||||
pre_state = store.block_states[signed_block.message.parent_root]
|
||||
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||
if merge_block:
|
||||
assert spec.is_merge_transition_block(pre_state, signed_block.message.body)
|
||||
|
||||
if store.time < block_time:
|
||||
on_tick_and_append_step(spec, store, block_time, test_steps)
|
||||
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||
while store.time < block_time:
|
||||
time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
|
||||
post_state = yield from add_block(
|
||||
spec, store, signed_block, test_steps,
|
||||
@@ -39,6 +41,11 @@ def add_attestation(spec, store, attestation, test_steps, is_from_block=False):
|
||||
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||
|
||||
|
||||
def add_attestations(spec, store, attestations, test_steps, is_from_block=False):
|
||||
for attestation in attestations:
|
||||
yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block)
|
||||
|
||||
|
||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
|
||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||
@@ -90,6 +97,7 @@ def get_attester_slashing_file_name(attester_slashing):
|
||||
def on_tick_and_append_step(spec, store, time, test_steps):
|
||||
spec.on_tick(store, time)
|
||||
test_steps.append({'tick': int(time)})
|
||||
output_store_checks(spec, store, test_steps)
|
||||
|
||||
|
||||
def run_on_block(spec, store, signed_block, valid=True):
|
||||
@@ -153,25 +161,7 @@ def add_block(spec,
|
||||
assert store.blocks[block_root] == signed_block.message
|
||||
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
|
||||
if not is_optimistic:
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'time': int(store.time),
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
'justified_checkpoint': {
|
||||
'epoch': int(store.justified_checkpoint.epoch),
|
||||
'root': encode_hex(store.justified_checkpoint.root),
|
||||
},
|
||||
'finalized_checkpoint': {
|
||||
'epoch': int(store.finalized_checkpoint.epoch),
|
||||
'root': encode_hex(store.finalized_checkpoint.root),
|
||||
},
|
||||
'best_justified_checkpoint': {
|
||||
'epoch': int(store.best_justified_checkpoint.epoch),
|
||||
'root': encode_hex(store.best_justified_checkpoint.root),
|
||||
},
|
||||
'proposer_boost_root': encode_hex(store.proposer_boost_root),
|
||||
}
|
||||
})
|
||||
output_store_checks(spec, store, test_steps)
|
||||
|
||||
return store.block_states[signed_block.message.hash_tree_root()]
|
||||
|
||||
@@ -217,6 +207,32 @@ def get_formatted_head_output(spec, store):
|
||||
}
|
||||
|
||||
|
||||
def output_head_check(spec, store, test_steps):
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def output_store_checks(spec, store, test_steps):
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'time': int(store.time),
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
'justified_checkpoint': {
|
||||
'epoch': int(store.justified_checkpoint.epoch),
|
||||
'root': encode_hex(store.justified_checkpoint.root),
|
||||
},
|
||||
'finalized_checkpoint': {
|
||||
'epoch': int(store.finalized_checkpoint.epoch),
|
||||
'root': encode_hex(store.finalized_checkpoint.root),
|
||||
},
|
||||
'proposer_boost_root': encode_hex(store.proposer_boost_root),
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def apply_next_epoch_with_attestations(spec,
|
||||
state,
|
||||
store,
|
||||
@@ -263,6 +279,39 @@ def apply_next_slots_with_attestations(spec,
|
||||
return post_state, store, last_signed_block
|
||||
|
||||
|
||||
def is_ready_to_justify(spec, state):
|
||||
"""
|
||||
Check if the given ``state`` will trigger justification updates at epoch boundary.
|
||||
"""
|
||||
temp_state = state.copy()
|
||||
spec.process_justification_and_finalization(temp_state)
|
||||
return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch
|
||||
|
||||
|
||||
def find_next_justifying_slot(spec,
|
||||
state,
|
||||
fill_cur_epoch,
|
||||
fill_prev_epoch,
|
||||
participation_fn=None):
|
||||
temp_state = state.copy()
|
||||
|
||||
signed_blocks = []
|
||||
justifying_slot = None
|
||||
while justifying_slot is None:
|
||||
signed_block = state_transition_with_full_block(
|
||||
spec,
|
||||
temp_state,
|
||||
fill_cur_epoch,
|
||||
fill_prev_epoch,
|
||||
participation_fn,
|
||||
)
|
||||
signed_blocks.append(signed_block)
|
||||
if is_ready_to_justify(spec, temp_state):
|
||||
justifying_slot = temp_state.slot
|
||||
|
||||
return signed_blocks, justifying_slot
|
||||
|
||||
|
||||
def get_pow_block_file_name(pow_block):
|
||||
return f"pow_block_{encode_hex(pow_block.block_hash)}"
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ from eth2spec.test.helpers.constants import (
|
||||
BELLATRIX,
|
||||
CAPELLA,
|
||||
DENEB,
|
||||
EIP6110,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
@@ -47,6 +48,7 @@ def _set_operations_by_dict(block, operation_dict):
|
||||
|
||||
def _state_transition_and_sign_block_at_slot(spec,
|
||||
state,
|
||||
sync_aggregate=None,
|
||||
operation_dict=None):
|
||||
"""
|
||||
Cribbed from ``transition_unsigned_block`` helper
|
||||
@@ -61,6 +63,8 @@ def _state_transition_and_sign_block_at_slot(spec,
|
||||
Thus use dict to pass operations.
|
||||
"""
|
||||
block = build_empty_block(spec, state)
|
||||
if sync_aggregate is not None:
|
||||
block.body.sync_aggregate = sync_aggregate
|
||||
|
||||
if operation_dict:
|
||||
_set_operations_by_dict(block, operation_dict)
|
||||
@@ -141,7 +145,7 @@ def state_transition_across_slots_with_ignoring_proposers(spec,
|
||||
next_slot(spec, state)
|
||||
|
||||
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None):
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
@@ -155,6 +159,8 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
|
||||
state = post_spec.upgrade_to_capella(state)
|
||||
elif post_spec.fork == DENEB:
|
||||
state = post_spec.upgrade_to_deneb(state)
|
||||
elif post_spec.fork == EIP6110:
|
||||
state = post_spec.upgrade_to_eip6110(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
|
||||
@@ -170,9 +176,17 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
|
||||
elif post_spec.fork == DENEB:
|
||||
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
|
||||
elif post_spec.fork == EIP6110:
|
||||
assert state.fork.previous_version == post_spec.config.DENEB_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.EIP6110_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
|
||||
return state, _state_transition_and_sign_block_at_slot(
|
||||
post_spec,
|
||||
state,
|
||||
sync_aggregate=sync_aggregate,
|
||||
operation_dict=operation_dict,
|
||||
)
|
||||
else:
|
||||
return state, None
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
from .constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
EIP6110,
|
||||
)
|
||||
|
||||
|
||||
def is_post_fork(a, b):
|
||||
if a == EIP6110:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110]
|
||||
if a == DENEB:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB]
|
||||
if a == CAPELLA:
|
||||
@@ -31,3 +34,7 @@ def is_post_capella(spec):
|
||||
|
||||
def is_post_deneb(spec):
|
||||
return is_post_fork(spec.fork, DENEB)
|
||||
|
||||
|
||||
def is_post_eip6110(spec):
|
||||
return is_post_fork(spec.fork, EIP6110)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_header_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_altair, is_post_bellatrix, is_post_capella,
|
||||
is_post_altair, is_post_bellatrix, is_post_capella, is_post_eip6110,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
|
||||
@@ -47,17 +47,20 @@ def get_sample_genesis_execution_payload_header(spec,
|
||||
)
|
||||
|
||||
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
withdrawals_trie_root = None
|
||||
deposit_receipts_trie_root = None
|
||||
|
||||
if is_post_capella(spec):
|
||||
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
else:
|
||||
withdrawals_trie_root = None
|
||||
if is_post_eip6110(spec):
|
||||
deposit_receipts_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
|
||||
payload_header.block_hash = compute_el_header_block_hash(
|
||||
spec,
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root,
|
||||
deposit_receipts_trie_root,
|
||||
)
|
||||
return payload_header
|
||||
|
||||
@@ -80,6 +83,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
elif spec.fork == DENEB:
|
||||
previous_version = spec.config.CAPELLA_FORK_VERSION
|
||||
current_version = spec.config.DENEB_FORK_VERSION
|
||||
elif spec.fork == EIP6110:
|
||||
previous_version = spec.config.DENEB_FORK_VERSION
|
||||
current_version = spec.config.EIP6110_FORK_VERSION
|
||||
|
||||
state = spec.BeaconState(
|
||||
genesis_time=0,
|
||||
@@ -129,4 +135,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
eth1_block_hash=eth1_block_hash,
|
||||
)
|
||||
|
||||
if is_post_eip6110(spec):
|
||||
state.deposit_receipts_start_index = spec.UNSET_DEPOSIT_RECEIPTS_START_INDEX
|
||||
|
||||
return state
|
||||
|
||||
@@ -31,7 +31,7 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
signature_state,
|
||||
signature_slot,
|
||||
max(signature_slot, 1) - 1,
|
||||
committee_indices[:num_participants],
|
||||
)
|
||||
sync_aggregate = spec.SyncAggregate(
|
||||
|
||||
@@ -12,7 +12,7 @@ from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
|
||||
|
||||
#
|
||||
# Containers from Deneb
|
||||
# Containers from EIP-4844
|
||||
#
|
||||
MAX_CALLDATA_SIZE = 2**24
|
||||
MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
|
||||
@@ -50,12 +50,9 @@ class SignedBlobTransaction(Container):
|
||||
signature: ECDSASignature
|
||||
|
||||
|
||||
def get_sample_blob(spec, rng=None):
|
||||
if rng is None:
|
||||
rng = random.Random(5566)
|
||||
|
||||
def get_sample_blob(spec, rng=random.Random(5566), is_valid_blob=True):
|
||||
values = [
|
||||
rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
rng.randint(0, spec.BLS_MODULUS - 1) if is_valid_blob else spec.BLS_MODULUS
|
||||
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
]
|
||||
|
||||
@@ -98,16 +95,23 @@ def get_poly_in_both_forms(spec, rng=None):
|
||||
return coeffs, evals
|
||||
|
||||
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blob=True):
|
||||
blobs = []
|
||||
blob_kzg_commitments = []
|
||||
blob_kzg_proofs = []
|
||||
blob_versioned_hashes = []
|
||||
for _ in range(blob_count):
|
||||
blob = get_sample_blob(spec, rng)
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob = get_sample_blob(spec, rng, is_valid_blob=is_valid_blob)
|
||||
if is_valid_blob:
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
|
||||
else:
|
||||
blob_commitment = spec.KZGCommitment()
|
||||
blob_kzg_proof = spec.KZGProof()
|
||||
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
|
||||
blobs.append(blob)
|
||||
blob_kzg_commitments.append(blob_commitment)
|
||||
blob_kzg_proofs.append(blob_kzg_proof)
|
||||
blob_versioned_hashes.append(blob_versioned_hash)
|
||||
|
||||
signed_blob_tx = SignedBlobTransaction(
|
||||
@@ -117,4 +121,4 @@ def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
)
|
||||
serialized_tx = serialize(signed_blob_tx)
|
||||
opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx
|
||||
return opaque_tx, blobs, blob_kzg_commitments
|
||||
return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.test.context import (
|
||||
MAINNET,
|
||||
spec_state_test,
|
||||
with_all_phases,
|
||||
with_altair_and_later,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
@@ -31,7 +31,7 @@ def _apply_base_block_a(spec, state, store, test_steps):
|
||||
assert spec.get_head(store) == signed_block_a.message.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_ex_ante_vanilla(spec, state):
|
||||
"""
|
||||
@@ -118,7 +118,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro
|
||||
return proposer_score // base_effective_balance + 1
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create non-duplicate committee")
|
||||
@spec_state_test
|
||||
def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state):
|
||||
@@ -191,7 +191,7 @@ def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, st
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_ex_ante_sandwich_without_attestations(spec, state):
|
||||
"""
|
||||
@@ -254,7 +254,7 @@ def test_ex_ante_sandwich_without_attestations(spec, state):
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_ex_ante_sandwich_with_honest_attestation(spec, state):
|
||||
"""
|
||||
@@ -335,7 +335,7 @@ def test_ex_ante_sandwich_with_honest_attestation(spec, state):
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create non-duplicate committee")
|
||||
@spec_state_test
|
||||
def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state):
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import random
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_all_phases,
|
||||
with_altair_and_later,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
|
||||
@@ -22,6 +21,8 @@ from eth2spec.test.helpers.fork_choice import (
|
||||
add_attestation,
|
||||
tick_and_run_on_attestation,
|
||||
tick_and_add_block,
|
||||
output_head_check,
|
||||
apply_next_epoch_with_attestations,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_altair,
|
||||
@@ -36,7 +37,7 @@ from eth2spec.test.helpers.state import (
|
||||
rng = random.Random(1001)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_genesis(spec, state):
|
||||
test_steps = []
|
||||
@@ -60,7 +61,7 @@ def test_genesis(spec, state):
|
||||
yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_chain_no_attestations(spec, state):
|
||||
test_steps = []
|
||||
@@ -71,11 +72,7 @@ def test_chain_no_attestations(spec, state):
|
||||
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||
block_1 = build_empty_block_for_next_slot(spec, state)
|
||||
@@ -88,16 +85,12 @@ def test_chain_no_attestations(spec, state):
|
||||
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_split_tie_breaker_no_attestations(spec, state):
|
||||
test_steps = []
|
||||
@@ -109,11 +102,7 @@ def test_split_tie_breaker_no_attestations(spec, state):
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# Create block at slot 1
|
||||
block_1_state = genesis_state.copy()
|
||||
@@ -135,16 +124,12 @@ def test_split_tie_breaker_no_attestations(spec, state):
|
||||
|
||||
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
|
||||
assert spec.get_head(store) == highest_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_shorter_chain_but_heavier_weight(spec, state):
|
||||
test_steps = []
|
||||
@@ -156,11 +141,7 @@ def test_shorter_chain_but_heavier_weight(spec, state):
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# build longer tree
|
||||
long_state = genesis_state.copy()
|
||||
@@ -183,16 +164,12 @@ def test_shorter_chain_but_heavier_weight(spec, state):
|
||||
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
|
||||
|
||||
assert spec.get_head(store) == spec.hash_tree_root(short_block)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_filtered_block_tree(spec, state):
|
||||
@@ -203,11 +180,7 @@ def test_filtered_block_tree(spec, state):
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# transition state past initial couple of epochs
|
||||
next_epoch(spec, state)
|
||||
@@ -227,13 +200,7 @@ def test_filtered_block_tree(spec, state):
|
||||
# the last block in the branch should be the head
|
||||
expected_head_root = spec.hash_tree_root(signed_blocks[-1].message)
|
||||
assert spec.get_head(store) == expected_head_root
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
#
|
||||
# create branch containing the justified block but not containing enough on
|
||||
@@ -274,16 +241,12 @@ def test_filtered_block_tree(spec, state):
|
||||
|
||||
# ensure that get_head still returns the head from the previous branch
|
||||
assert spec.get_head(store) == expected_head_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store)
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_proposer_boost_correct_head(spec, state):
|
||||
test_steps = []
|
||||
@@ -295,11 +258,7 @@ def test_proposer_boost_correct_head(spec, state):
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
|
||||
state_1 = genesis_state.copy()
|
||||
@@ -337,19 +296,14 @@ def test_proposer_boost_correct_head(spec, state):
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_discard_equivocations(spec, state):
|
||||
def test_discard_equivocations_on_attester_slashing(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
@@ -359,11 +313,7 @@ def test_discard_equivocations(spec, state):
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
# Build block that serves as head before discarding equivocations
|
||||
state_1 = genesis_state.copy()
|
||||
@@ -418,11 +368,369 @@ def test_discard_equivocations(spec, state):
|
||||
# The head should revert to block_2
|
||||
yield from add_attester_slashing(spec, store, attester_slashing, test_steps)
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_discard_equivocations_slashed_validator_censoring(spec, state):
|
||||
# Check that the store does not count LMD votes from validators that are slashed in the justified state
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0
|
||||
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
|
||||
|
||||
# We will slash all validators voting at the 2nd slot of epoch 0
|
||||
current_slot = spec.get_current_slot(store)
|
||||
eqv_slot = current_slot + 1
|
||||
eqv_epoch = spec.compute_epoch_at_slot(eqv_slot)
|
||||
assert eqv_slot % spec.SLOTS_PER_EPOCH == 1
|
||||
assert eqv_epoch == 0
|
||||
slashed_validators = []
|
||||
comm_count = spec.get_committee_count_per_slot(state, eqv_epoch)
|
||||
for comm_index in range(comm_count):
|
||||
comm = spec.get_beacon_committee(state, eqv_slot, comm_index)
|
||||
slashed_validators += comm
|
||||
assert len(slashed_validators) > 0
|
||||
|
||||
# Slash those validators in the state
|
||||
for val_index in slashed_validators:
|
||||
state.validators[val_index].slashed = True
|
||||
|
||||
# Store this state as the anchor state
|
||||
anchor_state = state.copy()
|
||||
# Generate an anchor block with correct state root
|
||||
anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root())
|
||||
yield 'anchor_state', anchor_state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
# Get a new store with the anchor state & anchor block
|
||||
store = spec.get_forkchoice_store(anchor_state, anchor_block)
|
||||
|
||||
# Now generate the store checks
|
||||
current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
# Create two competing blocks at eqv_slot
|
||||
next_slots(spec, state, eqv_slot - state.slot - 1)
|
||||
assert state.slot == eqv_slot - 1
|
||||
|
||||
state_1 = state.copy()
|
||||
block_1 = build_empty_block_for_next_slot(spec, state_1)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1)
|
||||
|
||||
state_2 = state.copy()
|
||||
block_2 = build_empty_block_for_next_slot(spec, state_2)
|
||||
block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2)
|
||||
|
||||
assert block_1.slot == block_2.slot == eqv_slot
|
||||
|
||||
# Add both blocks to the store
|
||||
yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
|
||||
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
# Find out which block will win in tie breaking
|
||||
if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2):
|
||||
block_low_root = block_1.hash_tree_root()
|
||||
block_low_root_post_state = state_1
|
||||
block_high_root = block_2.hash_tree_root()
|
||||
else:
|
||||
block_low_root = block_2.hash_tree_root()
|
||||
block_low_root_post_state = state_2
|
||||
block_high_root = block_1.hash_tree_root()
|
||||
assert block_low_root < block_high_root
|
||||
|
||||
# Tick to next slot so proposer boost does not apply
|
||||
current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
|
||||
# Check that block with higher root wins
|
||||
assert spec.get_head(store) == block_high_root
|
||||
|
||||
# Create attestation for block with lower root
|
||||
attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True)
|
||||
# Check that all attesting validators were slashed in the anchor state
|
||||
att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0)
|
||||
for i in att_comm:
|
||||
assert anchor_state.validators[i].slashed
|
||||
# Add attestation to the store
|
||||
yield from add_attestation(spec, store, attestation, test_steps)
|
||||
# Check that block with higher root still wins
|
||||
assert spec.get_head(store) == block_high_root
|
||||
output_head_check(spec, store, test_steps)
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_voting_source_within_two_epoch(spec, state):
|
||||
"""
|
||||
Check that the store allows for a head block that has:
|
||||
- store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
|
||||
- store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
|
||||
- store.voting_source[block_root].epoch + 2 >= current_epoch, and
|
||||
- store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch)
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert store.finalized_checkpoint.epoch == 2
|
||||
|
||||
# Copy the state to use later
|
||||
fork_state = state.copy()
|
||||
|
||||
# Fill epoch 4
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
assert store.finalized_checkpoint.epoch == 3
|
||||
|
||||
# Create a fork from the earlier saved state
|
||||
next_epoch(spec, fork_state)
|
||||
assert spec.compute_epoch_at_slot(fork_state.slot) == 5
|
||||
_, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
|
||||
# Only keep the blocks from epoch 5, so discard the last generated block
|
||||
signed_blocks = signed_blocks[:-1]
|
||||
last_fork_block = signed_blocks[-1].message
|
||||
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5
|
||||
|
||||
# Now add the fork to the store
|
||||
for signed_block in signed_blocks:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
assert store.finalized_checkpoint.epoch == 3
|
||||
|
||||
# Check that the last block from the fork is the head
|
||||
# LMD votes for the competing branch are overwritten so this fork should win
|
||||
last_fork_block_root = last_fork_block.hash_tree_root()
|
||||
# assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
|
||||
assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
|
||||
# assert store.voting_source[last_fork_block_root].epoch + 2 >= \
|
||||
# spec.compute_epoch_at_slot(spec.get_current_slot(store))
|
||||
assert store.finalized_checkpoint.root == spec.get_checkpoint_block(
|
||||
store,
|
||||
last_fork_block_root,
|
||||
store.finalized_checkpoint.epoch
|
||||
)
|
||||
assert spec.get_head(store) == last_fork_block_root
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_voting_source_beyond_two_epoch(spec, state):
|
||||
"""
|
||||
Check that the store doesn't allow for a head block that has:
|
||||
- store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
|
||||
- store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
|
||||
- store.voting_source[block_root].epoch + 2 < current_epoch, and
|
||||
- store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch)
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert store.finalized_checkpoint.epoch == 2
|
||||
|
||||
# Copy the state to use later
|
||||
fork_state = state.copy()
|
||||
|
||||
# Fill epoch 4 and 5
|
||||
for _ in range(2):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
|
||||
assert store.finalized_checkpoint.epoch == 4
|
||||
|
||||
# Create a fork from the earlier saved state
|
||||
for _ in range(2):
|
||||
next_epoch(spec, fork_state)
|
||||
assert spec.compute_epoch_at_slot(fork_state.slot) == 6
|
||||
assert fork_state.current_justified_checkpoint.epoch == 3
|
||||
_, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
|
||||
# Only keep the blocks from epoch 6, so discard the last generated block
|
||||
signed_blocks = signed_blocks[:-1]
|
||||
last_fork_block = signed_blocks[-1].message
|
||||
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6
|
||||
|
||||
# Store the head before adding the fork to the store
|
||||
correct_head = spec.get_head(store)
|
||||
|
||||
# Now add the fork to the store
|
||||
for signed_block in signed_blocks:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
|
||||
assert store.finalized_checkpoint.epoch == 4
|
||||
|
||||
last_fork_block_root = last_fork_block.hash_tree_root()
|
||||
last_fork_block_state = store.block_states[last_fork_block_root]
|
||||
assert last_fork_block_state.current_justified_checkpoint.epoch == 3
|
||||
|
||||
# Check that the head is unchanged
|
||||
# assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
|
||||
assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
|
||||
# assert store.voting_source[last_fork_block_root].epoch + 2 < \
|
||||
# spec.compute_epoch_at_slot(spec.get_current_slot(store))
|
||||
assert store.finalized_checkpoint.root == spec.get_checkpoint_block(
|
||||
store,
|
||||
last_fork_block_root,
|
||||
store.finalized_checkpoint.epoch
|
||||
)
|
||||
assert spec.get_head(store) == correct_head
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
"""
|
||||
Note:
|
||||
We are unable to generate test vectors that check failure of the correct_finalized condition.
|
||||
We cannot generate a block that:
|
||||
- has !correct_finalized, and
|
||||
- has correct_justified, and
|
||||
- is a descendant of store.justified_checkpoint.root
|
||||
|
||||
The block being a descendant of store.justified_checkpoint.root is necessary because
|
||||
filter_block_tree descends the tree starting at store.justified_checkpoint.root
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_finalized(spec, state):
|
||||
# Check that the store doesn't allow for a head block that has:
|
||||
# - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and
|
||||
# - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and
|
||||
# - store.finalized_checkpoint.root != get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch)
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 4
|
||||
for _ in range(4):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
assert store.finalized_checkpoint.epoch == 3
|
||||
|
||||
# Identify the fork block as the last block in epoch 4
|
||||
fork_block_root = state.latest_block_header.parent_root
|
||||
fork_block = store.blocks[fork_block_root]
|
||||
assert spec.compute_epoch_at_slot(fork_block.slot) == 4
|
||||
# Copy the state to use later
|
||||
fork_state = store.block_states[fork_block_root].copy()
|
||||
assert spec.compute_epoch_at_slot(fork_state.slot) == 4
|
||||
assert fork_state.current_justified_checkpoint.epoch == 3
|
||||
assert fork_state.finalized_checkpoint.epoch == 2
|
||||
|
||||
# Create a fork from the earlier saved state
|
||||
for _ in range(2):
|
||||
next_epoch(spec, fork_state)
|
||||
assert spec.compute_epoch_at_slot(fork_state.slot) == 6
|
||||
assert fork_state.current_justified_checkpoint.epoch == 4
|
||||
assert fork_state.finalized_checkpoint.epoch == 3
|
||||
# Fill epoch 6
|
||||
signed_blocks = []
|
||||
_, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
|
||||
signed_blocks += signed_blocks_1
|
||||
assert spec.compute_epoch_at_slot(fork_state.slot) == 7
|
||||
# Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork
|
||||
assert fork_state.current_justified_checkpoint.epoch == 6
|
||||
assert fork_state.finalized_checkpoint.epoch == 3
|
||||
# Create a chain in epoch 7 that has new justification for epoch 7
|
||||
_, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
|
||||
# Only keep the blocks from epoch 7, so discard the last generated block
|
||||
signed_blocks_2 = signed_blocks_2[:-1]
|
||||
signed_blocks += signed_blocks_2
|
||||
last_fork_block = signed_blocks[-1].message
|
||||
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7
|
||||
|
||||
# Now add the fork to the store
|
||||
for signed_block in signed_blocks:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
|
||||
assert store.justified_checkpoint.epoch == 6
|
||||
assert store.finalized_checkpoint.epoch == 3
|
||||
|
||||
# Fill epoch 5 and 6 in the original chain
|
||||
for _ in range(2):
|
||||
state, store, signed_head_block = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, False, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6
|
||||
assert store.finalized_checkpoint.epoch == 5
|
||||
# Store the expected head
|
||||
head_root = signed_head_block.message.hash_tree_root()
|
||||
|
||||
# Check that the head is unchanged
|
||||
last_fork_block_root = last_fork_block.hash_tree_root()
|
||||
assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch
|
||||
assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH
|
||||
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert store.finalized_checkpoint.root != spec.get_checkpoint_block(
|
||||
store,
|
||||
block_root,
|
||||
store.finalized_checkpoint.epoch
|
||||
)
|
||||
assert spec.get_head(store) != last_fork_block_root
|
||||
assert spec.get_head(store) == head_root
|
||||
|
||||
yield 'steps', test_steps
|
||||
"""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
498
tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py
Normal file
498
tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py
Normal file
@@ -0,0 +1,498 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
state_transition_with_full_block,
|
||||
get_valid_attestation,
|
||||
get_valid_attestation_at_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block,
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
add_attestations,
|
||||
tick_and_add_block,
|
||||
apply_next_epoch_with_attestations,
|
||||
find_next_justifying_slot,
|
||||
is_ready_to_justify,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
next_slot,
|
||||
transition_to,
|
||||
)
|
||||
|
||||
|
||||
TESTING_PRESETS = [MINIMAL]
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state):
|
||||
"""
|
||||
[Case 1]
|
||||
|
||||
{ epoch 4 }{ epoch 5 }
|
||||
[c4]<--[a]<--[-]<--[y]
|
||||
↑____[-]<--[z]
|
||||
|
||||
At c4, c3 is the latest justified checkpoint (or something earlier)
|
||||
|
||||
The block y doesn't have enough votes to justify c4.
|
||||
The block z also doesn't have enough votes to justify c4.
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# create block_a, it needs 2 more full blocks to justify epoch 4
|
||||
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
|
||||
for signed_block in signed_blocks[:-2]:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
next_slot(spec, state)
|
||||
state_a = state.copy()
|
||||
|
||||
# to test the "no withholding" situation, temporarily store the blocks in lists
|
||||
signed_blocks_of_y = []
|
||||
signed_blocks_of_z = []
|
||||
|
||||
# add an empty block on chain y
|
||||
block_y = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_y = state_transition_and_sign_block(spec, state, block_y)
|
||||
signed_blocks_of_y.append(signed_block_y)
|
||||
|
||||
# chain y has some on-chain attestations, but not enough to justify c4
|
||||
signed_block_y = state_transition_with_full_block(spec, state, True, True)
|
||||
assert not is_ready_to_justify(spec, state)
|
||||
signed_blocks_of_y.append(signed_block_y)
|
||||
assert store.justified_checkpoint.epoch == 3
|
||||
|
||||
state = state_a.copy()
|
||||
signed_block_z = None
|
||||
# add one block on chain z, which is not enough to justify c4
|
||||
attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
|
||||
block_z = build_empty_block_for_next_slot(spec, state)
|
||||
block_z.body.attestations = [attestation]
|
||||
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
|
||||
signed_blocks_of_z.append(signed_block_z)
|
||||
|
||||
# add an empty block on chain z
|
||||
block_z = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
|
||||
signed_blocks_of_z.append(signed_block_z)
|
||||
|
||||
# ensure z couldn't justify c4
|
||||
assert not is_ready_to_justify(spec, state)
|
||||
|
||||
# apply blocks to store
|
||||
# (i) slot block_a.slot + 1
|
||||
signed_block_y = signed_blocks_of_y.pop(0)
|
||||
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
|
||||
# apply block of chain `z`
|
||||
signed_block_z = signed_blocks_of_z.pop(0)
|
||||
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
|
||||
|
||||
# (ii) slot block_a.slot + 2
|
||||
# apply block of chain `z`
|
||||
signed_block_z = signed_blocks_of_z.pop(0)
|
||||
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
|
||||
# apply block of chain `y`
|
||||
signed_block_y = signed_blocks_of_y.pop(0)
|
||||
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
|
||||
# chain `y` remains the winner since it arrives earlier than `z`
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
|
||||
# tick to the prior of the epoch boundary
|
||||
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
# chain `y` reminds the winner
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
# to next block
|
||||
next_epoch(spec, state)
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch):
|
||||
"""
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 2
|
||||
for _ in range(2):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, False, False, test_steps=test_steps)
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||
else:
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
# try to find the block that can justify epoch 3
|
||||
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True)
|
||||
else:
|
||||
# try to find the block that can justify epoch 4
|
||||
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
|
||||
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
|
||||
for signed_block in signed_blocks:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == 2
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
|
||||
assert is_ready_to_justify(spec, state)
|
||||
state_b = state.copy()
|
||||
|
||||
# add chain y
|
||||
if is_justifying_previous_epoch:
|
||||
signed_block_y = state_transition_with_full_block(spec, state, False, True)
|
||||
else:
|
||||
signed_block_y = state_transition_with_full_block(spec, state, True, True)
|
||||
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
if is_justifying_previous_epoch:
|
||||
assert store.justified_checkpoint.epoch == 2
|
||||
else:
|
||||
assert store.justified_checkpoint.epoch == 3
|
||||
|
||||
# add attestations of y
|
||||
temp_state = state.copy()
|
||||
next_slot(spec, temp_state)
|
||||
attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot))
|
||||
current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
yield from add_attestations(spec, store, attestations_for_y, test_steps)
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
if attemped_reorg:
|
||||
# add chain z
|
||||
state = state_b.copy()
|
||||
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
|
||||
transition_to(spec, state, slot)
|
||||
block_z = build_empty_block_for_next_slot(spec, state)
|
||||
assert spec.compute_epoch_at_slot(block_z.slot) == 5
|
||||
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
|
||||
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
|
||||
else:
|
||||
# next epoch
|
||||
state = state_b.copy()
|
||||
next_epoch(spec, state)
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
|
||||
# no reorg
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state):
|
||||
"""
|
||||
[Case 2]
|
||||
|
||||
{ epoch 4 }{ epoch 5 }
|
||||
[c4]<--[b]<--[y]
|
||||
↑______________[z]
|
||||
At c4, c3 is the latest justified checkpoint (or something earlier)
|
||||
|
||||
block_b: the block that can justify c4.
|
||||
z: the child of block of x at the first slot of epoch 5.
|
||||
block z can reorg the chain from block y.
|
||||
"""
|
||||
yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False)
|
||||
|
||||
|
||||
def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 2
|
||||
for _ in range(2):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
if is_justifying_previous_epoch:
|
||||
block_a = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_a = state_transition_and_sign_block(spec, state, block_a)
|
||||
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||
else:
|
||||
# fill one more epoch
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
signed_block_a = state_transition_with_full_block(spec, state, True, True)
|
||||
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
spec.get_head(store) == signed_block_a.message.hash_tree_root()
|
||||
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == 2
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
state_a = state.copy()
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
# try to find the block that can justify epoch 3
|
||||
_, justifying_slot = find_next_justifying_slot(spec, state, False, True)
|
||||
else:
|
||||
# try to find the block that can justify epoch 4
|
||||
_, justifying_slot = find_next_justifying_slot(spec, state, True, True)
|
||||
|
||||
last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1
|
||||
last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1
|
||||
|
||||
# to test the "no withholding" situation, temporarily store the blocks in lists
|
||||
signed_blocks_of_y = []
|
||||
|
||||
# build an empty chain to the slot prior epoch boundary
|
||||
signed_blocks_of_empty_chain = []
|
||||
states_of_empty_chain = []
|
||||
|
||||
for slot in range(state.slot + 1, last_slot_of_y + 1):
|
||||
block = build_empty_block(spec, state, slot=slot)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
signed_blocks_of_empty_chain.append(signed_block)
|
||||
states_of_empty_chain.append(state.copy())
|
||||
signed_blocks_of_y.append(signed_block)
|
||||
|
||||
signed_block_y = signed_blocks_of_empty_chain[-1]
|
||||
|
||||
# create 2/3 votes for the empty chain
|
||||
attestations_for_y = []
|
||||
# target_is_current = not is_justifying_previous_epoch
|
||||
attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot))
|
||||
attestations_for_y.append(attestations)
|
||||
for state in states_of_empty_chain:
|
||||
attestations = list(get_valid_attestation_at_slot(state, spec, state.slot))
|
||||
attestations_for_y.append(attestations)
|
||||
|
||||
state = state_a.copy()
|
||||
signed_block_z = None
|
||||
|
||||
for slot in range(state_a.slot + 1, last_slot_of_z + 1):
|
||||
# apply chain y, the empty chain
|
||||
if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0:
|
||||
signed_block_y = signed_blocks_of_y.pop(0)
|
||||
assert signed_block_y.message.slot == slot
|
||||
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
|
||||
|
||||
# apply chain z, a fork chain that includes these attestations_for_y
|
||||
block = build_empty_block(spec, state, slot=slot)
|
||||
if (
|
||||
len(attestations_for_y) > 0 and (
|
||||
(not is_justifying_previous_epoch)
|
||||
or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5)
|
||||
)
|
||||
):
|
||||
block.body.attestations = attestations_for_y.pop(0)
|
||||
signed_block_z = state_transition_and_sign_block(spec, state, block)
|
||||
if signed_block_y != signed_block_z:
|
||||
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
|
||||
if is_ready_to_justify(spec, state):
|
||||
break
|
||||
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
if enough_ffg:
|
||||
assert is_ready_to_justify(spec, state)
|
||||
else:
|
||||
assert not is_ready_to_justify(spec, state)
|
||||
|
||||
# to next epoch
|
||||
next_epoch(spec, state)
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
|
||||
if enough_ffg:
|
||||
# reorg
|
||||
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
else:
|
||||
# no reorg
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state):
|
||||
"""
|
||||
[Case 3]
|
||||
"""
|
||||
yield from _run_include_votes_of_another_empty_chain(
|
||||
spec, state, enough_ffg=True, is_justifying_previous_epoch=False)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state):
|
||||
"""
|
||||
[Case 4]
|
||||
"""
|
||||
yield from _run_include_votes_of_another_empty_chain(
|
||||
spec, state, enough_ffg=False, is_justifying_previous_epoch=False)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_delayed_justification_current_epoch(spec, state):
|
||||
"""
|
||||
[Case 5]
|
||||
|
||||
To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
|
||||
this is the basic case if there is no chain z
|
||||
|
||||
{ epoch 4 }{ epoch 5 }
|
||||
[c4]<--[b]<--[y]
|
||||
|
||||
At c4, c3 is the latest justified checkpoint.
|
||||
|
||||
block_b: the block that can justify c4.
|
||||
"""
|
||||
yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_delayed_justification_previous_epoch(spec, state):
|
||||
"""
|
||||
[Case 6]
|
||||
|
||||
Similar to ``test_delayed_justification_current_epoch``,
|
||||
but includes attestations during epoch N to justify checkpoint N-1.
|
||||
|
||||
{ epoch 3 }{ epoch 4 }{ epoch 5 }
|
||||
[c3]<---------------[c4]---[b]<---------------------------------[y]
|
||||
|
||||
"""
|
||||
yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state):
|
||||
"""
|
||||
[Case 7]
|
||||
|
||||
Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
|
||||
but includes attestations during epoch N to justify checkpoint N-1.
|
||||
|
||||
{ epoch 3 }{ epoch 4 }{ epoch 5 }
|
||||
[c3]<---------------[c4]<--[b]<--[y]
|
||||
↑______________[z]
|
||||
|
||||
At c4, c2 is the latest justified checkpoint.
|
||||
|
||||
block_b: the block that can justify c3.
|
||||
z: the child of block of x at the first slot of epoch 5.
|
||||
block z can reorg the chain from block y.
|
||||
"""
|
||||
yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state):
|
||||
"""
|
||||
[Case 8]
|
||||
|
||||
Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``,
|
||||
but includes attestations during epoch N to justify checkpoint N-1.
|
||||
|
||||
"""
|
||||
yield from _run_include_votes_of_another_empty_chain(
|
||||
spec, state, enough_ffg=True, is_justifying_previous_epoch=True)
|
||||
@@ -0,0 +1,205 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
tick_and_add_block,
|
||||
apply_next_epoch_with_attestations,
|
||||
find_next_justifying_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
)
|
||||
|
||||
|
||||
TESTING_PRESETS = [MINIMAL]
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_withholding_attack(spec, state):
|
||||
"""
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Create the attack block that includes justifying attestations for epoch 4
|
||||
# This block is withheld & revealed only in epoch 5
|
||||
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
|
||||
assert len(signed_blocks) > 1
|
||||
signed_attack_block = signed_blocks[-1]
|
||||
for signed_block in signed_blocks[:-1]:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 4
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Create an honest chain in epoch 5 that includes the justifying attestations from the attack block
|
||||
next_epoch(spec, state)
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 5
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
# Create two block in the honest chain with full attestations, and add to the store
|
||||
for _ in range(2):
|
||||
signed_block = state_transition_with_full_block(spec, state, True, False)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
# Create final block in the honest chain that includes the justifying attestations from the attack block
|
||||
honest_block = build_empty_block_for_next_slot(spec, state)
|
||||
honest_block.body.attestations = signed_attack_block.message.body.attestations
|
||||
signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
|
||||
# Add the honest block to the store
|
||||
yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
|
||||
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Tick to the next slot so proposer boost is not a factor in choosing the head
|
||||
current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Upon revealing the withheld attack block, the honest block should still be the head
|
||||
yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
|
||||
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
|
||||
# As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated
|
||||
assert store.justified_checkpoint.epoch == 4
|
||||
|
||||
# Even after going to the next epoch, the honest block should remain the head
|
||||
slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets(TESTING_PRESETS, reason="too slow")
|
||||
def test_withholding_attack_unviable_honest_chain(spec, state):
|
||||
"""
|
||||
Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond
|
||||
two epochs ago.
|
||||
"""
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
next_epoch(spec, state)
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 5
|
||||
|
||||
# Create the attack block that includes justifying attestations for epoch 5
|
||||
# This block is withheld & revealed only in epoch 6
|
||||
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
|
||||
assert len(signed_blocks) > 1
|
||||
signed_attack_block = signed_blocks[-1]
|
||||
for signed_block in signed_blocks[:-1]:
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 5
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Create an honest chain in epoch 6 that includes the justifying attestations from the attack block
|
||||
next_epoch(spec, state)
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 6
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
# Create two block in the honest chain with full attestations, and add to the store
|
||||
for _ in range(2):
|
||||
signed_block = state_transition_with_full_block(spec, state, True, False)
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
# Create final block in the honest chain that includes the justifying attestations from the attack block
|
||||
honest_block = build_empty_block_for_next_slot(spec, state)
|
||||
honest_block.body.attestations = signed_attack_block.message.body.attestations
|
||||
signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
|
||||
honest_block_root = signed_honest_block.message.hash_tree_root()
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
# Add the honest block to the store
|
||||
yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
|
||||
current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store))
|
||||
assert current_epoch == 6
|
||||
# assert store.voting_source[honest_block_root].epoch == 3
|
||||
assert spec.get_head(store) == honest_block_root
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Tick to the next slot so proposer boost is not a factor in choosing the head
|
||||
current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.get_head(store) == honest_block_root
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
# Upon revealing the withheld attack block, it should become the head
|
||||
yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
|
||||
# The attack block is pulled up and store.justified_checkpoint is updated
|
||||
assert store.justified_checkpoint.epoch == 5
|
||||
attack_block_root = signed_attack_block.message.hash_tree_root()
|
||||
assert spec.get_head(store) == attack_block_root
|
||||
|
||||
# After going to the next epoch, the honest block should become the head
|
||||
slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
|
||||
# assert store.voting_source[honest_block_root].epoch == 5
|
||||
assert spec.get_head(store) == honest_block_root
|
||||
|
||||
yield 'steps', test_steps
|
||||
@@ -1,87 +0,0 @@
|
||||
from copy import deepcopy
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store,
|
||||
run_on_block,
|
||||
apply_next_epoch_with_attestations,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||
"""
|
||||
NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint
|
||||
"""
|
||||
# Initialization
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
||||
state, store, last_signed_block = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, False)
|
||||
last_block_root = hash_tree_root(last_signed_block.message)
|
||||
|
||||
# NOTE: Mock fictitious justified checkpoint in store
|
||||
store.justified_checkpoint = spec.Checkpoint(
|
||||
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
||||
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
||||
)
|
||||
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
||||
|
||||
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
||||
just_block = build_empty_block_for_next_slot(spec, state)
|
||||
store.blocks[just_block.hash_tree_root()] = just_block
|
||||
|
||||
# Step time past safe slots
|
||||
spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
|
||||
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||
|
||||
previously_finalized = store.finalized_checkpoint
|
||||
previously_justified = store.justified_checkpoint
|
||||
|
||||
# Add a series of new blocks with "better" justifications
|
||||
best_justified_checkpoint = spec.Checkpoint(epoch=0)
|
||||
for i in range(3, 0, -1):
|
||||
# Mutate store
|
||||
just_state = store.block_states[last_block_root]
|
||||
new_justified = spec.Checkpoint(
|
||||
epoch=previously_justified.epoch + i,
|
||||
root=just_block.hash_tree_root(),
|
||||
)
|
||||
if new_justified.epoch > best_justified_checkpoint.epoch:
|
||||
best_justified_checkpoint = new_justified
|
||||
|
||||
just_state.current_justified_checkpoint = new_justified
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, just_state)
|
||||
signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
|
||||
|
||||
# NOTE: Mock store so that the modified state could be accessed
|
||||
parent_block = store.blocks[last_block_root].copy()
|
||||
parent_block.state_root = just_state.hash_tree_root()
|
||||
store.blocks[block.parent_root] = parent_block
|
||||
store.block_states[block.parent_root] = just_state.copy()
|
||||
assert block.parent_root in store.blocks.keys()
|
||||
assert block.parent_root in store.block_states.keys()
|
||||
|
||||
run_on_block(spec, store, signed_block)
|
||||
|
||||
assert store.finalized_checkpoint == previously_finalized
|
||||
assert store.justified_checkpoint == previously_justified
|
||||
# ensure the best from the series was stored
|
||||
assert store.best_justified_checkpoint == best_justified_checkpoint
|
||||
@@ -18,7 +18,6 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False):
|
||||
assert store.time == time
|
||||
|
||||
if new_justified_checkpoint:
|
||||
assert store.justified_checkpoint == store.best_justified_checkpoint
|
||||
assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch
|
||||
assert store.justified_checkpoint.root != previous_justified_checkpoint.root
|
||||
else:
|
||||
@@ -32,12 +31,12 @@ def test_basic(spec, state):
|
||||
run_on_tick(spec, store, store.time + 1)
|
||||
|
||||
|
||||
"""
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_update_justified_single_on_store_finalized_chain(spec, state):
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
|
||||
# [Mock store.best_justified_checkpoint]
|
||||
# Create a block at epoch 1
|
||||
next_epoch(spec, state)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@@ -58,8 +57,6 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
store.blocks[block.hash_tree_root()] = block
|
||||
store.block_states[block.hash_tree_root()] = state
|
||||
# Mock store.best_justified_checkpoint
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
|
||||
|
||||
run_on_tick(
|
||||
spec,
|
||||
@@ -67,6 +64,7 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
|
||||
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
|
||||
new_justified_checkpoint=True
|
||||
)
|
||||
"""
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@@ -89,7 +87,6 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
|
||||
root=block.hash_tree_root(),
|
||||
)
|
||||
|
||||
# [Mock store.best_justified_checkpoint]
|
||||
# Create a block at epoch 1
|
||||
state = init_state.copy()
|
||||
next_epoch(spec, state)
|
||||
@@ -112,79 +109,9 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
store.blocks[block.hash_tree_root()] = block.copy()
|
||||
store.block_states[block.hash_tree_root()] = state.copy()
|
||||
# Mock store.best_justified_checkpoint
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
|
||||
|
||||
run_on_tick(
|
||||
spec,
|
||||
store,
|
||||
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_same_slot_at_epoch_boundary(spec, state):
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
root=b'\x55' * 32,
|
||||
)
|
||||
|
||||
# set store time to already be at epoch boundary
|
||||
store.time = seconds_per_epoch
|
||||
|
||||
run_on_tick(spec, store, store.time + 1)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_not_epoch_boundary(spec, state):
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
root=b'\x55' * 32,
|
||||
)
|
||||
|
||||
run_on_tick(spec, store, store.time + spec.config.SECONDS_PER_SLOT)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_new_justified_equal_epoch(spec, state):
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
root=b'\x55' * 32,
|
||||
)
|
||||
|
||||
store.justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.best_justified_checkpoint.epoch,
|
||||
root=b'\44' * 32,
|
||||
)
|
||||
|
||||
run_on_tick(spec, store, store.time + seconds_per_epoch)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_new_justified_later_epoch(spec, state):
|
||||
store = get_genesis_forkchoice_store(spec, state)
|
||||
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
root=b'\x55' * 32,
|
||||
)
|
||||
|
||||
store.justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.best_justified_checkpoint.epoch + 1,
|
||||
root=b'\44' * 32,
|
||||
)
|
||||
|
||||
run_on_tick(spec, store, store.time + seconds_per_epoch)
|
||||
|
||||
@@ -75,7 +75,9 @@ def test_time(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_networking(spec, state):
|
||||
assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT
|
||||
assert spec.SUBNETS_PER_NODE <= spec.ATTESTATION_SUBNET_COUNT
|
||||
node_id_length = spec.NodeID(1).type_byte_length() # in bytes
|
||||
assert node_id_length * 8 == spec.NODE_ID_BITS # in bits
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
single_phase,
|
||||
spec_state_test,
|
||||
always_bls, with_phases, with_all_phases,
|
||||
spec_test,
|
||||
always_bls,
|
||||
with_phases,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
|
||||
@@ -476,3 +482,34 @@ def test_get_aggregate_and_proof_signature(spec, state):
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)):
|
||||
node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT
|
||||
epoch = rng.randint(0, 2**64 - 1)
|
||||
subnets = spec.compute_subscribed_subnets(node_id, epoch)
|
||||
assert len(subnets) == spec.SUBNETS_PER_NODE
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_1(spec):
|
||||
rng = random.Random(1111)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_2(spec):
|
||||
rng = random.Random(2222)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_3(spec):
|
||||
rng = random.Random(3333)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
@@ -235,7 +235,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random(
|
||||
def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)):
|
||||
block = random_block_capella(spec, state, signed_blocks, scenario_state)
|
||||
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1)
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
|
||||
@@ -12,6 +12,7 @@ from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
||||
FQ12 as py_ecc_GT,
|
||||
)
|
||||
from py_ecc.bls.g2_primitives import ( # noqa: F401
|
||||
curve_order as BLS_MODULUS,
|
||||
G1_to_pubkey as py_ecc_G1_to_bytes48,
|
||||
pubkey_to_G1 as py_ecc_bytes48_to_G1,
|
||||
G2_to_signature as py_ecc_G2_to_bytes96,
|
||||
|
||||
@@ -10,20 +10,13 @@ from typing import (
|
||||
from pathlib import Path
|
||||
|
||||
from eth_utils import encode_hex
|
||||
from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
||||
G1,
|
||||
G2,
|
||||
Z1,
|
||||
Z2,
|
||||
curve_order as BLS_MODULUS,
|
||||
add,
|
||||
multiply,
|
||||
neg,
|
||||
)
|
||||
from py_ecc.typing import (
|
||||
Optimized_Point3D,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import (
|
||||
BLS_MODULUS,
|
||||
)
|
||||
|
||||
|
||||
PRIMITIVE_ROOT_OF_UNITY = 7
|
||||
@@ -35,7 +28,7 @@ def generate_setup(generator: Optimized_Point3D, secret: int, length: int) -> Tu
|
||||
"""
|
||||
result = [generator]
|
||||
for _ in range(1, length):
|
||||
result.append(multiply(result[-1], secret))
|
||||
result.append(bls.multiply(result[-1], secret))
|
||||
return tuple(result)
|
||||
|
||||
|
||||
@@ -49,9 +42,9 @@ def fft(vals: Sequence[Optimized_Point3D], modulus: int, domain: int) -> Sequenc
|
||||
R = fft(vals[1::2], modulus, domain[::2])
|
||||
o = [0] * len(vals)
|
||||
for i, (x, y) in enumerate(zip(L, R)):
|
||||
y_times_root = multiply(y, domain[i])
|
||||
o[i] = add(x, y_times_root)
|
||||
o[i + len(L)] = add(x, neg(y_times_root))
|
||||
y_times_root = bls.multiply(y, domain[i])
|
||||
o[i] = bls.add(x, y_times_root)
|
||||
o[i + len(L)] = bls.add(x, bls.neg(y_times_root))
|
||||
return o
|
||||
|
||||
|
||||
@@ -90,12 +83,14 @@ def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]:
|
||||
# TODO: introduce an IFFT function for simplicity
|
||||
fft_output = fft(setup, BLS_MODULUS, domain)
|
||||
inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS)
|
||||
return tuple(bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
return tuple(bls.G1_to_bytes48(bls.multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
|
||||
|
||||
def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None:
|
||||
setup_g1 = generate_setup(bls.G1, secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2, secret, g2_length)
|
||||
bls.use_fastest()
|
||||
|
||||
setup_g1 = generate_setup(bls.G1(), secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2(), secret, g2_length)
|
||||
setup_g1_lagrange = get_lagrange(setup_g1)
|
||||
roots_of_unity = compute_roots_of_unity(g1_length)
|
||||
|
||||
|
||||
@@ -114,8 +114,8 @@ Optional step for optimistic sync tests.
|
||||
|
||||
This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#PayloadStatusV1)
|
||||
value that Execution Layer client mock returns in responses to the following Engine API calls:
|
||||
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
|
||||
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
|
||||
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
|
||||
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
|
||||
|
||||
*Note:* Status of a payload must be *initialized* via `on_payload_info` before the corresponding `on_block` execution step.
|
||||
|
||||
@@ -146,10 +146,6 @@ finalized_checkpoint: {
|
||||
epoch: int, -- Integer value from store.finalized_checkpoint.epoch
|
||||
root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root
|
||||
}
|
||||
best_justified_checkpoint: {
|
||||
epoch: int, -- Integer value from store.best_justified_checkpoint.epoch
|
||||
root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root
|
||||
}
|
||||
proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root
|
||||
```
|
||||
|
||||
@@ -160,7 +156,6 @@ For example:
|
||||
head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'}
|
||||
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
|
||||
best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||
proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
|
||||
```
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@ Operations:
|
||||
| `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Bellatrix) |
|
||||
| `withdrawals` | `ExecutionPayload` | `execution_payload` | `process_withdrawals(state, execution_payload)` (new in Capella) |
|
||||
| `bls_to_execution_change` | `SignedBLSToExecutionChange` | `address_change` | `process_bls_to_execution_change(state, address_change)` (new in Capella) |
|
||||
| `deposit_receipt` | `DepositReceipt` | `deposit_receipt` | `process_deposit_receipt(state, deposit_receipt)` (new in EIP6110) |
|
||||
|
||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -34,6 +34,8 @@ if __name__ == "__main__":
|
||||
|
||||
deneb_mods = capella_mods
|
||||
|
||||
eip6110_mods = deneb_mods
|
||||
|
||||
# TODO Custody Game testgen is disabled for now
|
||||
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [
|
||||
# 'reveal_deadlines',
|
||||
@@ -47,6 +49,7 @@ if __name__ == "__main__":
|
||||
BELLATRIX: bellatrix_mods,
|
||||
CAPELLA: capella_mods,
|
||||
DENEB: deneb_mods,
|
||||
EIP6110: eip6110_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="epoch_processing", all_mods=all_mods)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user