diff --git a/.circleci/config.yml b/.circleci/config.yml
index bdb3f5bc6..94065d0bb 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -142,7 +142,19 @@ jobs:
command: make citest fork=capella
- store_test_results:
path: tests/core/pyspec/test-reports
-
+ test-eip4844:
+ docker:
+ - image: circleci/python:3.8
+ working_directory: ~/specs-repo
+ steps:
+ - restore_cache:
+ key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
+ - restore_pyspec_cached_venv
+ - run:
+ name: Run py-tests
+ command: make citest fork=eip4844
+ - store_test_results:
+ path: tests/core/pyspec/test-reports
table_of_contents:
docker:
- image: circleci/node:10.16.3
@@ -260,6 +272,9 @@ workflows:
- test-capella:
requires:
- install_pyspec_test
+ - test-eip4844:
+ requires:
+ - install_pyspec_test
- table_of_contents
- codespell
- lint:
diff --git a/.gitignore b/.gitignore
index 101cb0b08..219251599 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,6 +19,7 @@ tests/core/pyspec/eth2spec/phase0/
tests/core/pyspec/eth2spec/altair/
tests/core/pyspec/eth2spec/bellatrix/
tests/core/pyspec/eth2spec/capella/
+tests/core/pyspec/eth2spec/eip4844/
# coverage reports
.htmlcov
diff --git a/Makefile b/Makefile
index e3ff8e455..725e64de8 100644
--- a/Makefile
+++ b/Makefile
@@ -23,13 +23,15 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
# To check generator matching:
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
-MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) \
+MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \
+ $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \
$(wildcard $(SPEC_DIR)/bellatrix/*.md) \
$(wildcard $(SPEC_DIR)/capella/*.md) \
$(wildcard $(SPEC_DIR)/custody/*.md) \
$(wildcard $(SPEC_DIR)/das/*.md) \
$(wildcard $(SPEC_DIR)/sharding/*.md) \
- $(wildcard $(SPEC_DIR)/eip4844/*.md)
+ $(wildcard $(SPEC_DIR)/eip4844/*.md) \
+ $(wildcard $(SSZ_DIR)/*.md)
COV_HTML_OUT=.htmlcov
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
@@ -107,13 +109,13 @@ find_test: pyspec
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
citest: pyspec
- mkdir -p tests/core/pyspec/test-reports/eth2spec;
+ mkdir -p $(TEST_REPORT_DIR);
ifdef fork
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 4 --bls-type=milagro --fork=$(fork) --junitxml=eth2spec/test_results.xml eth2spec
+ python3 -m pytest -n 4 --bls-type=milagro --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
else
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
+ python3 -m pytest -n 4 --bls-type=milagro --junitxml=test-reports/test_results.xml eth2spec
endif
@@ -136,7 +138,7 @@ codespell:
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
- && pylint --disable=all --enable unused-argument ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix \
+ && pylint --disable=all --enable unused-argument ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella
lint_generators: pyspec
diff --git a/README.md b/README.md
index fb58edf8d..56267c0db 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,6 @@ To learn more about proof-of-stake and sharding, see the [PoS FAQ](https://eth.w
This repository hosts the current Ethereum proof-of-stake specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests.
-
## Specs
[](https://github.com/ethereum/eth2.0-specs/releases/) [](https://badge.fury.io/py/eth2spec)
@@ -14,55 +13,22 @@ This repository hosts the current Ethereum proof-of-stake specifications. Discus
Core specifications for Ethereum proof-of-stake clients can be found in [specs](specs/). These are divided into features.
Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready.
-The current features are:
+### Stable Specifications
-### Phase 0
+| Seq. | Code Name | Fork Epoch | Specs |
+| - | - | - | - |
+| 0 | **Phase0** |`0` |
- Core
- [The beacon chain](specs/phase0/beacon-chain.md)
- [Deposit contract](specs/phase0/deposit-contract.md)
- [Beacon chain fork choice](specs/phase0/fork-choice.md)
- Additions
- [Honest validator guide](specs/phase0/validator.md)
- [P2P networking](specs/phase0/p2p-interface.md)
- [Weak subjectivity](specs/phase0/weak-subjectivity.md)
|
+| 1 | **Altair** | `74240` | - Core
- [Beacon chain changes](specs/altair/beacon-chain.md)
- [Altair fork](specs/altair/fork.md)
- Additions
- [Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))
- [Honest validator guide changes](specs/altair/validator.md)
- [P2P networking](specs/altair/p2p-interface.md)
|
+| 2 | **Bellatrix**
(["The Merge"](https://ethereum.org/en/upgrades/merge/)) | TBD | - Core
- [Beacon Chain changes](specs/bellatrix/beacon-chain.md)
- [Bellatrix fork](specs/bellatrix/fork.md)
- [Fork choice changes](specs/bellatrix/fork-choice.md)
- Additions
- [Honest validator guide changes](specs/bellatrix/validator.md)
- [P2P networking](specs/bellatrix/p2p-interface.md)
|
-* [The Beacon Chain](specs/phase0/beacon-chain.md)
-* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
-* [Deposit Contract](specs/phase0/deposit-contract.md)
-* [Honest Validator](specs/phase0/validator.md)
-* [P2P Networking](specs/phase0/p2p-interface.md)
-* [Weak Subjectivity](specs/phase0/weak-subjectivity.md)
-
-### Altair
-
-* [Beacon chain changes](specs/altair/beacon-chain.md)
-* [Altair fork](specs/altair/fork.md)
-* [Light client sync protocol](specs/altair/sync-protocol.md)
-* [Honest Validator guide changes](specs/altair/validator.md)
-* [P2P Networking](specs/altair/p2p-interface.md)
-
-### Bellatrix (also known as The Merge)
-
-The Bellatrix protocol upgrade is still actively in development. The exact specification has not been formally accepted as final and details are still subject to change.
-
-* Background material:
- * An [ethresear.ch](https://ethresear.ch) post [describing the basic mechanism of the CL+EL merge](https://ethresear.ch/t/the-eth1-eth2-transition/6265)
- * [ethereum.org](https://ethereum.org) high-level description of the CL+EL merge [here](https://ethereum.org/en/eth2/docking/)
-* Specifications:
- * [Beacon Chain changes](specs/bellatrix/beacon-chain.md)
- * [Bellatrix fork](specs/bellatrix/fork.md)
- * [Fork Choice changes](specs/bellatrix/fork-choice.md)
- * [Validator additions](specs/bellatrix/validator.md)
- * [P2P Networking](specs/bellatrix/p2p-interface.md)
-
-### Sharding
-
-Sharding follows Bellatrix, and is divided into three parts:
-
-* Sharding base functionality - In early engineering phase
- * [Beacon Chain changes](specs/sharding/beacon-chain.md)
- * [P2P Network changes](specs/sharding/p2p-interface.md)
-* Custody Game - Ready, dependent on sharding
- * [Beacon Chain changes](specs/custody_game/beacon-chain.md)
- * [Validator custody work](specs/custody_game/validator.md)
-* Data Availability Sampling - In active R&D
- * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD).
- * [Core types and functions](specs/das/das-core.md)
- * [P2P Networking](specs/das/p2p-interface.md)
- * [Fork Choice](specs/das/fork-choice.md)
- * [Sampling process](specs/das/sampling.md)
+### In-development Specifications
+| Code Name or Topic | Specs | Notes |
+| - | - | - |
+| Capella (tentative) | - Core
- [Beacon chain changes](specs/capella/beacon-chain.md)
- [Capella fork](specs/capella/fork.md)
- Additions
- [Validator additions](specs/capella/validator.md)
|
+| EIP4844 (tentative) | - Core
- [Beacon Chain changes](specs/eip4844/beacon-chain.md)
- [EIP-4844 fork](specs/eip4844/fork.md)
- [Polynomial commitments](specs/eip4844/polynomial-commitments.md)
- Additions
- [Honest validator guide changes](specs/eip4844/validator.md)
- [P2P networking](specs/eip4844/p2p-interface.md)
|
+| Sharding (outdated) | - Core
- [Beacon Chain changes](specs/sharding/beacon-chain.md)
- Additions
- [P2P networking](specs/sharding/p2p-interface.md)
|
+| Custody Game (outdated) | - Core
- [Beacon Chain changes](specs/custody_game/beacon-chain.md)
- Additions
- [Honest validator guide changes](specs/custody_game/validator.md)
| Dependent on sharding |
+| Data Availability Sampling (outdated) | - Core
- [Core types and functions](specs/das/das-core.md)
- [Fork choice changes](specs/das/fork-choice.md)
- Additions
- [P2P Networking](specs/das/p2p-interface.md)
- [Sampling process](specs/das/sampling.md)
| - Dependent on sharding
- [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
|
### Accompanying documents can be found in [specs](specs) and include:
diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml
index 1c0a12d4e..929d39f8a 100644
--- a/configs/mainnet.yaml
+++ b/configs/mainnet.yaml
@@ -12,8 +12,8 @@ CONFIG_NAME: 'mainnet'
# Transition
# ---------------------------------------------------------------
-# TBD, 2**256-2**10 is a placeholder
-TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912
+# Estimated on Sept 15, 2022
+TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000
# By default, don't use these params
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
@@ -43,13 +43,13 @@ ALTAIR_FORK_VERSION: 0x01000000
ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC
# Bellatrix
BELLATRIX_FORK_VERSION: 0x02000000
-BELLATRIX_FORK_EPOCH: 18446744073709551615
+BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
# Capella
CAPELLA_FORK_VERSION: 0x03000000
CAPELLA_FORK_EPOCH: 18446744073709551615
-# Sharding
-SHARDING_FORK_VERSION: 0x04000000
-SHARDING_FORK_EPOCH: 18446744073709551615
+# EIP4844
+EIP4844_FORK_VERSION: 0x04000000
+EIP4844_FORK_EPOCH: 18446744073709551615
diff --git a/configs/minimal.yaml b/configs/minimal.yaml
index e619ee931..5dde4b749 100644
--- a/configs/minimal.yaml
+++ b/configs/minimal.yaml
@@ -12,7 +12,7 @@ CONFIG_NAME: 'minimal'
# Transition
# ---------------------------------------------------------------
-# TBD, 2**256-2**10 is a placeholder
+# 2**256-2**10 for testing minimal network
TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912
# By default, don't use these params
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
@@ -46,9 +46,9 @@ BELLATRIX_FORK_EPOCH: 18446744073709551615
# Capella
CAPELLA_FORK_VERSION: 0x03000001
CAPELLA_FORK_EPOCH: 18446744073709551615
-# Sharding
-SHARDING_FORK_VERSION: 0x04000001
-SHARDING_FORK_EPOCH: 18446744073709551615
+# EIP4844
+EIP4844_FORK_VERSION: 0x04000001
+EIP4844_FORK_EPOCH: 18446744073709551615
# Time parameters
diff --git a/presets/mainnet/capella.yaml b/presets/mainnet/capella.yaml
index c5dfe1d4b..f04bdd06f 100644
--- a/presets/mainnet/capella.yaml
+++ b/presets/mainnet/capella.yaml
@@ -1 +1,24 @@
-# Minimal preset - Capella
+# Mainnet preset - Capella
+
+# Misc
+# ---------------------------------------------------------------
+# 2**8 (= 256) withdrawals
+MAX_PARTIAL_WITHDRAWALS_PER_EPOCH: 256
+
+
+# State list lengths
+# ---------------------------------------------------------------
+# 2**40 (= 1,099,511,627,776) withdrawals
+WITHDRAWAL_QUEUE_LIMIT: 1099511627776
+
+
+# Max operations per block
+# ---------------------------------------------------------------
+# 2**4 (= 16)
+MAX_BLS_TO_EXECUTION_CHANGES: 16
+
+
+# Execution
+# ---------------------------------------------------------------
+# 2**4 (= 16) withdrawals
+MAX_WITHDRAWALS_PER_PAYLOAD: 16
diff --git a/presets/mainnet/eip4844.yaml b/presets/mainnet/eip4844.yaml
new file mode 100644
index 000000000..c40908fde
--- /dev/null
+++ b/presets/mainnet/eip4844.yaml
@@ -0,0 +1,8 @@
+# Mainnet preset - Phase0
+
+# Misc
+# ---------------------------------------------------------------
+# `uint64(4096)`
+FIELD_ELEMENTS_PER_BLOB: 4096
+# `uint64(2**4)` (= 16)
+MAX_BLOBS_PER_BLOCK: 16
diff --git a/presets/minimal/capella.yaml b/presets/minimal/capella.yaml
index c5dfe1d4b..bf78685cd 100644
--- a/presets/minimal/capella.yaml
+++ b/presets/minimal/capella.yaml
@@ -1 +1,24 @@
# Minimal preset - Capella
+
+# Misc
+# ---------------------------------------------------------------
+# [customized] 16 for more interesting tests at low validator count
+MAX_PARTIAL_WITHDRAWALS_PER_EPOCH: 16
+
+
+# State list lengths
+# ---------------------------------------------------------------
+# 2**40 (= 1,099,511,627,776) withdrawals
+WITHDRAWAL_QUEUE_LIMIT: 1099511627776
+
+
+# Max operations per block
+# ---------------------------------------------------------------
+# 2**4 (= 16)
+MAX_BLS_TO_EXECUTION_CHANGES: 16
+
+
+# Execution
+# ---------------------------------------------------------------
+# [customized] Lower than MAX_PARTIAL_WITHDRAWALS_PER_EPOCH so not all processed in one block
+MAX_WITHDRAWALS_PER_PAYLOAD: 16
diff --git a/presets/minimal/eip4844.yaml b/presets/minimal/eip4844.yaml
new file mode 100644
index 000000000..fbb676819
--- /dev/null
+++ b/presets/minimal/eip4844.yaml
@@ -0,0 +1,8 @@
+# Minimal preset - Phase0
+
+# Misc
+# ---------------------------------------------------------------
+# [customized]
+FIELD_ELEMENTS_PER_BLOB: 4
+# `uint64(2**4)` (= 16)
+MAX_BLOBS_PER_BLOCK: 16
diff --git a/setup.py b/setup.py
index 57d8d8fe1..484861b85 100644
--- a/setup.py
+++ b/setup.py
@@ -45,6 +45,7 @@ PHASE0 = 'phase0'
ALTAIR = 'altair'
BELLATRIX = 'bellatrix'
CAPELLA = 'capella'
+EIP4844 = 'eip4844'
# The helper functions that are used when defining constants
@@ -230,7 +231,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) ->
if not _is_constant_id(name):
# Check for short type declarations
- if value.startswith(("uint", "Bytes", "ByteList", "Union")):
+ if value.startswith(("uint", "Bytes", "ByteList", "Union", "Vector", "List")):
custom_types[name] = value
continue
@@ -304,7 +305,7 @@ class SpecBuilder(ABC):
@classmethod
@abstractmethod
- def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO
+ def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]: # TODO
"""
The constants that are required for custom types.
"""
@@ -432,7 +433,7 @@ get_attesting_indices = cache_this(
return {}
@classmethod
- def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
+ def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
return {}
@classmethod
@@ -457,6 +458,7 @@ class AltairSpecBuilder(Phase0SpecBuilder):
from typing import NewType, Union as PyUnion
from eth2spec.phase0 import {preset_name} as phase0
+from eth2spec.test.helpers.merkle import build_proof
from eth2spec.utils.ssz.ssz_typing import Path
'''
@@ -474,13 +476,19 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariab
ssz_path = Path(ssz_class)
for item in path:
ssz_path = ssz_path / item
- return GeneralizedIndex(ssz_path.gindex())'''
+ return GeneralizedIndex(ssz_path.gindex())
+
+
+def compute_merkle_proof_for_state(state: BeaconState,
+ index: GeneralizedIndex) -> Sequence[Bytes32]:
+ return build_proof(state.get_backing(), index)'''
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
constants = {
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
+ 'CURRENT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(54)',
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
}
return {**super().hardcoded_ssz_dep_constants(), **constants}
@@ -547,11 +555,11 @@ EXECUTION_ENGINE = NoopExecutionEngine()"""
@classmethod
- def hardcoded_custom_type_dep_constants(cls) -> str:
+ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
constants = {
- 'MAX_BYTES_PER_TRANSACTION': 'uint64(2**30)',
+ 'MAX_BYTES_PER_TRANSACTION': spec_object.preset_vars['MAX_BYTES_PER_TRANSACTION'].value,
}
- return {**super().hardcoded_custom_type_dep_constants(), **constants}
+ return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
#
@@ -567,14 +575,57 @@ from eth2spec.bellatrix import {preset_name} as bellatrix
'''
+#
+# EIP4844SpecBuilder
+#
+class EIP4844SpecBuilder(BellatrixSpecBuilder):
+ fork: str = EIP4844
+
+ @classmethod
+ def imports(cls, preset_name: str):
+ return super().imports(preset_name) + f'''
+from eth2spec.utils import kzg
+from eth2spec.bellatrix import {preset_name} as bellatrix
+'''
+
+ @classmethod
+ def sundry_functions(cls) -> str:
+ return super().sundry_functions() + '''
+# TODO: for mainnet, load pre-generated trusted setup file to reduce building time.
+# TESTING_FIELD_ELEMENTS_PER_BLOB is hardcoded copy from minimal presets
+TESTING_FIELD_ELEMENTS_PER_BLOB = 4
+TESTING_SECRET = 1337
+TESTING_KZG_SETUP_G1 = kzg.generate_setup(bls.G1, TESTING_SECRET, TESTING_FIELD_ELEMENTS_PER_BLOB)
+TESTING_KZG_SETUP_G2 = kzg.generate_setup(bls.G2, TESTING_SECRET, TESTING_FIELD_ELEMENTS_PER_BLOB)
+TESTING_KZG_SETUP_LAGRANGE = kzg.get_lagrange(TESTING_KZG_SETUP_G1)
+
+KZG_SETUP_G1 = [bls.G1_to_bytes48(p) for p in TESTING_KZG_SETUP_G1]
+KZG_SETUP_G2 = [bls.G2_to_bytes96(p) for p in TESTING_KZG_SETUP_G2]
+KZG_SETUP_LAGRANGE = TESTING_KZG_SETUP_LAGRANGE
+ROOTS_OF_UNITY = kzg.compute_roots_of_unity(TESTING_FIELD_ELEMENTS_PER_BLOB)
+
+
+def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> BlobsSidecar:
+ pass'''
+
+ @classmethod
+ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
+ constants = {
+ 'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
+ 'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
+ }
+ return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
+
+
+
spec_builders = {
builder.fork: builder
- for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder)
+ for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, EIP4844SpecBuilder)
}
def is_spec_defined_type(value: str) -> bool:
- return value.startswith('ByteList') or value.startswith('Union')
+ return value.startswith(('ByteList', 'Union', 'Vector', 'List'))
def objects_to_spec(preset_name: str,
@@ -611,7 +662,11 @@ def objects_to_spec(preset_name: str,
protocols_spec = '\n\n\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())
for k in list(spec_object.functions):
- if "ceillog2" in k or "floorlog2" in k:
+ if k in [
+ "ceillog2",
+ "floorlog2",
+ "compute_merkle_proof_for_state",
+ ]:
del spec_object.functions[k]
functions = builder.implement_optimizations(spec_object.functions)
functions_spec = '\n\n\n'.join(functions.values())
@@ -652,7 +707,7 @@ def objects_to_spec(preset_name: str,
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))
ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))
- custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))
+ custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants(spec_object)[x]), builder.hardcoded_custom_type_dep_constants(spec_object)))
spec = (
builder.imports(preset_name)
+ builder.preparations()
@@ -772,7 +827,7 @@ def parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:
for k, v in conf.items():
if isinstance(v, str) and (v.startswith("0x") or k == 'PRESET_BASE' or k == 'CONFIG_NAME'):
# Represent byte data with string, to avoid misinterpretation as big-endian int.
- # Everything is either byte data or an integer, with PRESET_BASE as one exception.
+ # Everything except PRESET_BASE and CONFIG_NAME is either byte data or an integer.
out[k] = f"'{v}'"
else:
out[k] = str(int(v))
@@ -869,28 +924,32 @@ class PySpecCommand(Command):
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
- if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA):
+ if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844):
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
"""
- if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA):
+ if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, EIP4844):
self.md_doc_paths += """
+ specs/altair/light-client/full-node.md
+ specs/altair/light-client/light-client.md
+ specs/altair/light-client/p2p-interface.md
+ specs/altair/light-client/sync-protocol.md
specs/altair/beacon-chain.md
specs/altair/bls.md
specs/altair/fork.md
specs/altair/validator.md
specs/altair/p2p-interface.md
- specs/altair/sync-protocol.md
"""
- if self.spec_fork in (BELLATRIX, CAPELLA):
+ if self.spec_fork in (BELLATRIX, CAPELLA, EIP4844):
self.md_doc_paths += """
specs/bellatrix/beacon-chain.md
specs/bellatrix/fork.md
specs/bellatrix/fork-choice.md
specs/bellatrix/validator.md
+ specs/bellatrix/p2p-interface.md
sync/optimistic.md
"""
if self.spec_fork == CAPELLA:
@@ -901,6 +960,14 @@ class PySpecCommand(Command):
specs/capella/validator.md
specs/capella/p2p-interface.md
"""
+ if self.spec_fork == EIP4844:
+ self.md_doc_paths += """
+ specs/eip4844/beacon-chain.md
+ specs/eip4844/fork.md
+ specs/eip4844/polynomial-commitments.md
+ specs/eip4844/p2p-interface.md
+ specs/eip4844/validator.md
+ """
if len(self.md_doc_paths) == 0:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@@ -1041,7 +1108,7 @@ setup(
extras_require={
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
"lint": ["flake8==3.7.7", "mypy==0.812", "pylint==2.12.2"],
- "generator": ["python-snappy==0.5.4"],
+ "generator": ["python-snappy==0.5.4", "filelock"],
},
install_requires=[
"eth-utils>=1.3.0,<2",
diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md
index a14ddb4f6..79ab8d022 100644
--- a/specs/altair/beacon-chain.md
+++ b/specs/altair/beacon-chain.md
@@ -682,7 +682,7 @@ def process_sync_committee_updates(state: BeaconState) -> None:
This helper function is only for initializing the state for pure Altair testnets and tests.
-*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
+*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the previous and current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
```python
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
diff --git a/specs/altair/fork.md b/specs/altair/fork.md
index 6228022b8..bf8499a21 100644
--- a/specs/altair/fork.md
+++ b/specs/altair/fork.md
@@ -7,6 +7,9 @@
- [Introduction](#introduction)
- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [`compute_fork_version`](#compute_fork_version)
- [Fork to Altair](#fork-to-altair)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
@@ -26,6 +29,22 @@ Warning: this configuration is not definitive.
| `ALTAIR_FORK_VERSION` | `Version('0x01000000')` |
| `ALTAIR_FORK_EPOCH` | `Epoch(74240)` (Oct 27, 2021, 10:56:23am UTC) |
+## Helper functions
+
+### Misc
+
+#### `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
+
## Fork to Altair
### Fork trigger
diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md
new file mode 100644
index 000000000..09810a037
--- /dev/null
+++ b/specs/altair/light-client/full-node.md
@@ -0,0 +1,165 @@
+# Altair Light Client -- Full Node
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Helper functions](#helper-functions)
+ - [`compute_merkle_proof_for_state`](#compute_merkle_proof_for_state)
+- [Deriving light client data](#deriving-light-client-data)
+ - [`create_light_client_bootstrap`](#create_light_client_bootstrap)
+ - [`create_light_client_update`](#create_light_client_update)
+ - [`create_light_client_finality_update`](#create_light_client_finality_update)
+ - [`create_light_client_optimistic_update`](#create_light_client_optimistic_update)
+
+
+
+
+## Introduction
+
+This document provides helper functions to enable full nodes to serve light client data. Full nodes SHOULD implement the described functionality to enable light clients to sync with the network.
+
+## Helper functions
+
+### `compute_merkle_proof_for_state`
+
+```python
+def compute_merkle_proof_for_state(state: BeaconState,
+ index: GeneralizedIndex) -> Sequence[Bytes32]:
+ ...
+```
+
+## Deriving light client data
+
+Full nodes are expected to derive light client data from historic blocks and states and provide it to other clients.
+
+### `create_light_client_bootstrap`
+
+```python
+def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
+ assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
+ assert state.slot == state.latest_block_header.slot
+
+ return LightClientBootstrap(
+ header=BeaconBlockHeader(
+ slot=state.latest_block_header.slot,
+ proposer_index=state.latest_block_header.proposer_index,
+ parent_root=state.latest_block_header.parent_root,
+ state_root=hash_tree_root(state),
+ body_root=state.latest_block_header.body_root,
+ ),
+ current_sync_committee=state.current_sync_committee,
+ current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX)
+ )
+```
+
+Full nodes SHOULD provide `LightClientBootstrap` for all finalized epoch boundary blocks in the epoch range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientBootstrap` for other blocks.
+
+Blocks are considered to be epoch boundary blocks if their block root can occur as part of a valid `Checkpoint`, i.e., if their slot is the initial slot of an epoch, or if all following slots through the initial slot of the next epoch are empty (no block proposed / orphaned).
+
+`LightClientBootstrap` is computed from the block's immediate post state (without applying empty slots).
+
+### `create_light_client_update`
+
+To form a `LightClientUpdate`, the following historical states and blocks are needed:
+- `state`: the post state of any block with a post-Altair parent block
+- `block`: the corresponding block
+- `attested_state`: the post state of the block referred to by `block.parent_root`
+- `finalized_block`: the block referred to by `attested_state.finalized_checkpoint.root`, if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
+
+```python
+def create_light_client_update(state: BeaconState,
+ block: SignedBeaconBlock,
+ attested_state: BeaconState,
+ finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
+ assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
+ assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
+
+ assert state.slot == state.latest_block_header.slot
+ header = state.latest_block_header.copy()
+ header.state_root = hash_tree_root(state)
+ assert hash_tree_root(header) == hash_tree_root(block.message)
+ update_signature_period = compute_sync_committee_period(compute_epoch_at_slot(block.message.slot))
+
+ assert attested_state.slot == attested_state.latest_block_header.slot
+ attested_header = attested_state.latest_block_header.copy()
+ attested_header.state_root = hash_tree_root(attested_state)
+ assert hash_tree_root(attested_header) == block.message.parent_root
+ update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
+
+ # `next_sync_committee` is only useful if the message is signed by the current sync committee
+ if update_attested_period == update_signature_period:
+ next_sync_committee = attested_state.next_sync_committee
+ next_sync_committee_branch = compute_merkle_proof_for_state(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
+ else:
+ next_sync_committee = SyncCommittee()
+ next_sync_committee_branch = [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
+
+ # Indicate finality whenever possible
+ if finalized_block is not None:
+ if finalized_block.message.slot != GENESIS_SLOT:
+ finalized_header = BeaconBlockHeader(
+ slot=finalized_block.message.slot,
+ proposer_index=finalized_block.message.proposer_index,
+ parent_root=finalized_block.message.parent_root,
+ state_root=finalized_block.message.state_root,
+ body_root=hash_tree_root(finalized_block.message.body),
+ )
+ assert hash_tree_root(finalized_header) == attested_state.finalized_checkpoint.root
+ else:
+ assert attested_state.finalized_checkpoint.root == Bytes32()
+ finalized_header = BeaconBlockHeader()
+ finality_branch = compute_merkle_proof_for_state(attested_state, FINALIZED_ROOT_INDEX)
+ else:
+ finalized_header = BeaconBlockHeader()
+ finality_branch = [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
+
+ return LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finalized_header,
+ finality_branch=finality_branch,
+ sync_aggregate=block.message.body.sync_aggregate,
+ signature_slot=block.message.slot,
+ )
+```
+
+Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `is_better_update`) for each sync committee period covering any epochs in range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientUpdate` for other sync committee periods.
+
+- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.slot`
+- `LightClientUpdate` are only considered if `compute_sync_committee_period(compute_epoch_at_slot(update.attested_header.slot)) == compute_sync_committee_period(compute_epoch_at_slot(update.signature_slot))`
+- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time.
+
+### `create_light_client_finality_update`
+
+```python
+def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
+ return LightClientFinalityUpdate(
+ attested_header=update.attested_header,
+ finalized_header=update.finalized_header,
+ finality_branch=update.finality_branch,
+ sync_aggregate=update.sync_aggregate,
+ signature_slot=update.signature_slot,
+ )
+```
+
+Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
+
+### `create_light_client_optimistic_update`
+
+```python
+def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
+ return LightClientOptimisticUpdate(
+ attested_header=update.attested_header,
+ sync_aggregate=update.sync_aggregate,
+ signature_slot=update.signature_slot,
+ )
+```
+
+Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes.
diff --git a/specs/altair/light-client/light-client.md b/specs/altair/light-client/light-client.md
new file mode 100644
index 000000000..318950437
--- /dev/null
+++ b/specs/altair/light-client/light-client.md
@@ -0,0 +1,30 @@
+# Altair Light Client -- Light Client
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Light client sync process](#light-client-sync-process)
+
+
+
+
+## Introduction
+
+This document explains how light clients MAY obtain light client data to sync with the network.
+
+## Light client sync process
+
+1. The light client MUST be configured out-of-band with a spec/preset (including fork schedule), with `genesis_state` (including `genesis_time` and `genesis_validators_root`), and with a trusted block root. The trusted block SHOULD be within the weak subjectivity period, and its root SHOULD be from a finalized `Checkpoint`.
+2. The local clock is initialized based on the configured `genesis_time`, and the current fork digest is determined to browse for and connect to relevant light client data providers.
+3. The light client fetches a [`LightClientBootstrap`](./sync-protocol.md#lightclientbootstrap) object for the configured trusted block root. The `bootstrap` object is passed to [`initialize_light_client_store`](./sync-protocol.md#initialize_light_client_store) to obtain a local [`LightClientStore`](./sync-protocol.md#lightclientstore).
+4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.slot`, `optimistic_period` from `store.optimistic_header.slot`, and `current_period` from `current_slot` based on the local clock.
+ 1. When `finalized_period == optimistic_period` and [`is_next_sync_committee_known`](./sync-protocol.md#is_next_sync_committee_known) indicates `False`, the light client fetches a [`LightClientUpdate`](./sync-protocol.md#lightclientupdate) for `finalized_period`. If `finalized_period == current_period`, this fetch SHOULD be scheduled at a random time before `current_period` advances.
+ 2. When `finalized_period + 1 < current_period`, the light client fetches a `LightClientUpdate` for each sync committee period in range `[finalized_period + 1, current_period)` (current period excluded)
+ 3. When `finalized_period + 1 >= current_period`, the light client keeps observing [`LightClientFinalityUpdate`](./sync-protocol.md#lightclientfinalityupdate) and [`LightClientOptimisticUpdate`](./sync-protocol.md#lightclientoptimisticupdate). Received objects are passed to [`process_light_client_finality_update`](./sync-protocol.md#process_light_client_finality_update) and [`process_light_client_optimistic_update`](./sync-protocol.md#process_light_client_optimistic_update). This ensures that `finalized_header` and `optimistic_header` reflect the latest blocks.
+5. [`process_light_client_store_force_update`](./sync-protocol.md#process_light_client_store_force_update) MAY be called based on use case dependent heuristics if light client sync appears stuck. If available, falling back to an alternative syncing mechanism to cover the affected sync committee period is preferred.
diff --git a/specs/altair/light-client/p2p-interface.md b/specs/altair/light-client/p2p-interface.md
new file mode 100644
index 000000000..5c2b27b22
--- /dev/null
+++ b/specs/altair/light-client/p2p-interface.md
@@ -0,0 +1,257 @@
+# Altair Light Client -- Networking
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Networking](#networking)
+ - [Configuration](#configuration)
+ - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
+ - [Topics and messages](#topics-and-messages)
+ - [Global topics](#global-topics)
+ - [`light_client_finality_update`](#light_client_finality_update)
+ - [`light_client_optimistic_update`](#light_client_optimistic_update)
+ - [The Req/Resp domain](#the-reqresp-domain)
+ - [Messages](#messages)
+ - [GetLightClientBootstrap](#getlightclientbootstrap)
+ - [LightClientUpdatesByRange](#lightclientupdatesbyrange)
+ - [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
+ - [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
+- [Light clients](#light-clients)
+- [Validator assignments](#validator-assignments)
+ - [Beacon chain responsibilities](#beacon-chain-responsibilities)
+ - [Sync committee](#sync-committee)
+
+
+
+
+## Networking
+
+This section extends the [networking specification for Altair](../p2p-interface.md) with additional messages, topics and data to the Req-Resp and Gossip domains.
+
+### Configuration
+
+| Name | Value | Description |
+| - | - | - |
+| `MAX_REQUEST_LIGHT_CLIENT_UPDATES` | `2**7` (= 128) | Maximum number of `LightClientUpdate` instances in a single request |
+
+### The gossip domain: gossipsub
+
+Gossip meshes are added to allow light clients to stay in sync with the network.
+
+#### Topics and messages
+
+New global topics are added to provide light clients with the latest updates.
+
+| name | Message Type |
+| - | - |
+| `light_client_finality_update` | `LightClientFinalityUpdate` |
+| `light_client_optimistic_update` | `LightClientOptimisticUpdate` |
+
+##### Global topics
+
+###### `light_client_finality_update`
+
+This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
+
+The following validations MUST pass before forwarding the `finality_update` on the network.
+- _[IGNORE]_ No other `finality_update` with a lower or equal `finalized_header.slot` was already forwarded on the network
+- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
+
+For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
+- _[IGNORE]_ The received `finality_update` matches the locally computed one exactly (as defined in [`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update))
+
+For light clients, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
+- _[REJECT]_ The `finality_update` is valid -- i.e. validate that `process_light_client_finality_update` does not indicate errors
+- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.slot`
+
+Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored.
+
+###### `light_client_optimistic_update`
+
+This topic is used to propagate the latest `LightClientOptimisticUpdate` to light clients, allowing them to keep track of the latest `optimistic_header`.
+
+The following validations MUST pass before forwarding the `optimistic_update` on the network.
+- _[IGNORE]_ No other `optimistic_update` with a lower or equal `attested_header.slot` was already forwarded on the network
+- _[IGNORE]_ The `optimistic_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `optimistic_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
+
+For full nodes, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
+- _[IGNORE]_ The received `optimistic_update` matches the locally computed one exactly (as defined in [`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update))
+
+For light clients, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
+- _[REJECT]_ The `optimistic_update` is valid -- i.e. validate that `process_light_client_optimistic_update` does not indicate errors
+- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.slot`
+
+Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored.
+
+### The Req/Resp domain
+
+#### Messages
+
+##### GetLightClientBootstrap
+
+**Protocol ID:** `/eth2/beacon_chain/req/light_client_bootstrap/1/`
+
+Request Content:
+
+```
+(
+ Root
+)
+```
+
+Response Content:
+
+```
+(
+ LightClientBootstrap
+)
+```
+
+Requests the `LightClientBootstrap` structure corresponding to a given post-Altair beacon block root.
+
+The request MUST be encoded as an SSZ-field.
+
+Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block's post state needs to be known.
+
+When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`.
+
+A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.slot))` is used to select the fork namespace of the Response type.
+
+Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+| ------------------------------- | ------------------------------------ |
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` and later | `altair.LightClientBootstrap` |
+
+##### LightClientUpdatesByRange
+
+**Protocol ID:** `/eth2/beacon_chain/req/light_client_updates_by_range/1/`
+
+Request Content:
+```
+(
+ start_period: uint64
+ count: uint64
+)
+```
+
+Response Content:
+```
+(
+ List[LightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES]
+)
+```
+
+Requests the `LightClientUpdate` instances in the sync committee period range `[start_period, start_period + count)`, leading up to the current head sync committee period as selected by fork choice.
+
+The request MUST be encoded as an SSZ-container.
+
+The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `LightClientUpdate` payload.
+
+Peers SHOULD provide results as defined in [`create_light_client_update`](./full-node.md#create_light_client_update). They MUST respond with at least the earliest known result within the requested range, and MUST send results in consecutive order (by period). The response MUST NOT contain more than `min(MAX_REQUEST_LIGHT_CLIENT_UPDATES, count)` results.
+
+For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`.
+
+Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response chunk SSZ type |
+| ------------------------------- | ------------------------------------ |
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` and later | `altair.LightClientUpdate` |
+
+##### GetLightClientFinalityUpdate
+
+**Protocol ID:** `/eth2/beacon_chain/req/light_client_finality_update/1/`
+
+No Request Content.
+
+Response Content:
+
+```
+(
+ LightClientFinalityUpdate
+)
+```
+
+Requests the latest `LightClientFinalityUpdate` known by a peer.
+
+Peers SHOULD provide results as defined in [`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update).
+
+When no `LightClientFinalityUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
+
+A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`.
+
+Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+| ------------------------------- | ------------------------------------ |
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` and later | `altair.LightClientFinalityUpdate` |
+
+##### GetLightClientOptimisticUpdate
+
+**Protocol ID:** `/eth2/beacon_chain/req/light_client_optimistic_update/1/`
+
+No Request Content.
+
+Response Content:
+
+```
+(
+ LightClientOptimisticUpdate
+)
+```
+
+Requests the latest `LightClientOptimisticUpdate` known by a peer.
+
+Peers SHOULD provide results as defined in [`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update).
+
+When no `LightClientOptimisticUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
+
+A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`.
+
+Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+| ------------------------------- | ------------------------------------ |
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` and later | `altair.LightClientOptimisticUpdate` |
+
+## Light clients
+
+Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimistic` structures.
+
+Light clients MAY also collect historic light client data and make it available to other peers. If they do, they SHOULD advertise supported message endpoints in [the Req/Resp domain](#the-reqresp-domain), and MAY also update the contents of their [`Status`](../../phase0/p2p-interface.md#status) message to reflect the locally available light client data.
+
+If only limited light client data is locally available, the light client SHOULD use data based on `genesis_block` and `GENESIS_SLOT` in its `Status` message. Hybrid peers that also implement full node functionality MUST only incorporate data based on their full node sync progress into their `Status` message.
+
+## Validator assignments
+
+This section extends the [honest validator specification](../validator.md) with additional responsibilities to enable light clients to sync with the network.
+
+### Beacon chain responsibilities
+
+All full nodes SHOULD subscribe to and provide stability on the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics by validating all received messages.
+
+### Sync committee
+
+Whenever fork choice selects a new head block with a sync aggregate participation `>= MIN_SYNC_COMMITTEE_PARTICIPANTS` and a post-Altair parent block, full nodes with at least one validator assigned to the current sync committee at the block's `slot` SHOULD broadcast derived light client data as follows:
+
+- If `finalized_header.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation.
+- If `attested_header.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation.
+
+These messages SHOULD be broadcasted after one-third of `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot). To ensure that the corresponding block was given enough time to propagate through the network, they SHOULD NOT be sent earlier. Note that this is different from how other messages are handled, e.g., attestations, which may be sent early.
diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md
new file mode 100644
index 000000000..39a0b3d11
--- /dev/null
+++ b/specs/altair/light-client/sync-protocol.md
@@ -0,0 +1,496 @@
+# Altair Light Client -- Sync Protocol
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+- [Preset](#preset)
+ - [Misc](#misc)
+- [Containers](#containers)
+ - [`LightClientBootstrap`](#lightclientbootstrap)
+ - [`LightClientUpdate`](#lightclientupdate)
+ - [`LightClientFinalityUpdate`](#lightclientfinalityupdate)
+ - [`LightClientOptimisticUpdate`](#lightclientoptimisticupdate)
+ - [`LightClientStore`](#lightclientstore)
+- [Helper functions](#helper-functions)
+ - [`is_sync_committee_update`](#is_sync_committee_update)
+ - [`is_finality_update`](#is_finality_update)
+ - [`is_better_update`](#is_better_update)
+ - [`is_next_sync_committee_known`](#is_next_sync_committee_known)
+ - [`get_safety_threshold`](#get_safety_threshold)
+ - [`get_subtree_index`](#get_subtree_index)
+ - [`compute_sync_committee_period_at_slot`](#compute_sync_committee_period_at_slot)
+- [Light client initialization](#light-client-initialization)
+ - [`initialize_light_client_store`](#initialize_light_client_store)
+- [Light client state updates](#light-client-state-updates)
+ - [`validate_light_client_update`](#validate_light_client_update)
+ - [`apply_light_client_update`](#apply_light_client_update)
+ - [`process_light_client_store_force_update`](#process_light_client_store_force_update)
+ - [`process_light_client_update`](#process_light_client_update)
+ - [`process_light_client_finality_update`](#process_light_client_finality_update)
+ - [`process_light_client_optimistic_update`](#process_light_client_optimistic_update)
+
+
+
+
+## Introduction
+
+The beacon chain is designed to be light client friendly for constrained environments to
+access Ethereum with reasonable safety and liveness.
+Such environments include resource-constrained devices (e.g. phones for trust-minimized wallets)
+and metered VMs (e.g. blockchain VMs for cross-chain bridges).
+
+This document suggests a minimal light client design for the beacon chain that
+uses sync committees introduced in [this beacon chain extension](./beacon-chain.md).
+
+Additional documents describe how the light client sync protocol can be used:
+- [Full node](./full-node.md)
+- [Light client](./light-client.md)
+- [Networking](./p2p-interface.md)
+
+## Constants
+
+| Name | Value |
+| - | - |
+| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
+| `CURRENT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 54) |
+| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
+
+## Preset
+
+### Misc
+
+| Name | Value | Unit | Duration |
+| - | - | - | - |
+| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` | validators | |
+| `UPDATE_TIMEOUT` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | slots | ~27.3 hours |
+
+## Containers
+
+### `LightClientBootstrap`
+
+```python
+class LightClientBootstrap(Container):
+ # The requested beacon block header
+ header: BeaconBlockHeader
+ # Current sync committee corresponding to `header`
+ current_sync_committee: SyncCommittee
+ current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
+```
+
+### `LightClientUpdate`
+
+```python
+class LightClientUpdate(Container):
+ # The beacon block header that is attested to by the sync committee
+ attested_header: BeaconBlockHeader
+ # Next sync committee corresponding to `attested_header`
+ next_sync_committee: SyncCommittee
+ next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
+ # The finalized beacon block header attested to by Merkle branch
+ finalized_header: BeaconBlockHeader
+ finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
+ # Sync committee aggregate signature
+ sync_aggregate: SyncAggregate
+ # Slot at which the aggregate signature was created (untrusted)
+ signature_slot: Slot
+```
+
+### `LightClientFinalityUpdate`
+
+```python
+class LightClientFinalityUpdate(Container):
+ # The beacon block header that is attested to by the sync committee
+ attested_header: BeaconBlockHeader
+ # The finalized beacon block header attested to by Merkle branch
+ finalized_header: BeaconBlockHeader
+ finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
+ # Sync committee aggregate signature
+ sync_aggregate: SyncAggregate
+ # Slot at which the aggregate signature was created (untrusted)
+ signature_slot: Slot
+```
+
+### `LightClientOptimisticUpdate`
+
+```python
+class LightClientOptimisticUpdate(Container):
+ # The beacon block header that is attested to by the sync committee
+ attested_header: BeaconBlockHeader
+ # Sync committee aggregate signature
+ sync_aggregate: SyncAggregate
+ # Slot at which the aggregate signature was created (untrusted)
+ signature_slot: Slot
+```
+
+### `LightClientStore`
+
+```python
+@dataclass
+class LightClientStore(object):
+ # Beacon block header that is finalized
+ finalized_header: BeaconBlockHeader
+ # Sync committees corresponding to the header
+ current_sync_committee: SyncCommittee
+ next_sync_committee: SyncCommittee
+ # Best available header to switch finalized head to if we see nothing else
+ best_valid_update: Optional[LightClientUpdate]
+ # Most recent available reasonably-safe header
+ optimistic_header: BeaconBlockHeader
+ # Max number of active participants in a sync committee (used to calculate safety threshold)
+ previous_max_active_participants: uint64
+ current_max_active_participants: uint64
+```
+
+## Helper functions
+
+### `is_sync_committee_update`
+
+```python
+def is_sync_committee_update(update: LightClientUpdate) -> bool:
+ return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
+```
+
+### `is_finality_update`
+
+```python
+def is_finality_update(update: LightClientUpdate) -> bool:
+ return update.finality_branch != [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
+```
+
+### `is_better_update`
+
+```python
+def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdate) -> bool:
+ # Compare supermajority (> 2/3) sync committee participation
+ max_active_participants = len(new_update.sync_aggregate.sync_committee_bits)
+ new_num_active_participants = sum(new_update.sync_aggregate.sync_committee_bits)
+ old_num_active_participants = sum(old_update.sync_aggregate.sync_committee_bits)
+ new_has_supermajority = new_num_active_participants * 3 >= max_active_participants * 2
+ old_has_supermajority = old_num_active_participants * 3 >= max_active_participants * 2
+ if new_has_supermajority != old_has_supermajority:
+ return new_has_supermajority > old_has_supermajority
+ if not new_has_supermajority and new_num_active_participants != old_num_active_participants:
+ return new_num_active_participants > old_num_active_participants
+
+ # Compare presence of relevant sync committee
+ new_has_relevant_sync_committee = is_sync_committee_update(new_update) and (
+ compute_sync_committee_period_at_slot(new_update.attested_header.slot)
+ == compute_sync_committee_period_at_slot(new_update.signature_slot)
+ )
+ old_has_relevant_sync_committee = is_sync_committee_update(old_update) and (
+ compute_sync_committee_period_at_slot(old_update.attested_header.slot)
+ == compute_sync_committee_period_at_slot(old_update.signature_slot)
+ )
+ if new_has_relevant_sync_committee != old_has_relevant_sync_committee:
+ return new_has_relevant_sync_committee
+
+ # Compare indication of any finality
+ new_has_finality = is_finality_update(new_update)
+ old_has_finality = is_finality_update(old_update)
+ if new_has_finality != old_has_finality:
+ return new_has_finality
+
+ # Compare sync committee finality
+ if new_has_finality:
+ new_has_sync_committee_finality = (
+ compute_sync_committee_period_at_slot(new_update.finalized_header.slot)
+ == compute_sync_committee_period_at_slot(new_update.attested_header.slot)
+ )
+ old_has_sync_committee_finality = (
+ compute_sync_committee_period_at_slot(old_update.finalized_header.slot)
+ == compute_sync_committee_period_at_slot(old_update.attested_header.slot)
+ )
+ if new_has_sync_committee_finality != old_has_sync_committee_finality:
+ return new_has_sync_committee_finality
+
+ # Tiebreaker 1: Sync committee participation beyond supermajority
+ if new_num_active_participants != old_num_active_participants:
+ return new_num_active_participants > old_num_active_participants
+
+ # Tiebreaker 2: Prefer older data (fewer changes to best)
+ if new_update.attested_header.slot != old_update.attested_header.slot:
+ return new_update.attested_header.slot < old_update.attested_header.slot
+ return new_update.signature_slot < old_update.signature_slot
+```
+
+### `is_next_sync_committee_known`
+
+```python
+def is_next_sync_committee_known(store: LightClientStore) -> bool:
+ return store.next_sync_committee != SyncCommittee()
+```
+
+### `get_safety_threshold`
+
+```python
+def get_safety_threshold(store: LightClientStore) -> uint64:
+ return max(
+ store.previous_max_active_participants,
+ store.current_max_active_participants,
+ ) // 2
+```
+
+### `get_subtree_index`
+
+```python
+def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
+ return uint64(generalized_index % 2**(floorlog2(generalized_index)))
+```
+
+### `compute_sync_committee_period_at_slot`
+
+```python
+def compute_sync_committee_period_at_slot(slot: Slot) -> uint64:
+ return compute_sync_committee_period(compute_epoch_at_slot(slot))
+```
+
+## Light client initialization
+
+A light client maintains its state in a `store` object of type `LightClientStore`. `initialize_light_client_store` initializes a new `store` with a received `LightClientBootstrap` derived from a given `trusted_block_root`.
+
+### `initialize_light_client_store`
+
+```python
+def initialize_light_client_store(trusted_block_root: Root,
+ bootstrap: LightClientBootstrap) -> LightClientStore:
+ assert hash_tree_root(bootstrap.header) == trusted_block_root
+
+ assert is_valid_merkle_branch(
+ leaf=hash_tree_root(bootstrap.current_sync_committee),
+ branch=bootstrap.current_sync_committee_branch,
+ depth=floorlog2(CURRENT_SYNC_COMMITTEE_INDEX),
+ index=get_subtree_index(CURRENT_SYNC_COMMITTEE_INDEX),
+ root=bootstrap.header.state_root,
+ )
+
+ return LightClientStore(
+ finalized_header=bootstrap.header,
+ current_sync_committee=bootstrap.current_sync_committee,
+ next_sync_committee=SyncCommittee(),
+ best_valid_update=None,
+ optimistic_header=bootstrap.header,
+ previous_max_active_participants=0,
+ current_max_active_participants=0,
+ )
+```
+
+## Light client state updates
+
+- A light client receives objects of type `LightClientUpdate`, `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`:
+ - **`update: LightClientUpdate`**: Every `update` triggers `process_light_client_update(store, update, current_slot, genesis_validators_root)` where `current_slot` is the current slot based on a local clock.
+ - **`finality_update: LightClientFinalityUpdate`**: Every `finality_update` triggers `process_light_client_finality_update(store, finality_update, current_slot, genesis_validators_root)`.
+ - **`optimistic_update: LightClientOptimisticUpdate`**: Every `optimistic_update` triggers `process_light_client_optimistic_update(store, optimistic_update, current_slot, genesis_validators_root)`.
+- `process_light_client_store_force_update` MAY be called based on use case dependent heuristics if light client sync appears stuck.
+
+### `validate_light_client_update`
+
+```python
+def validate_light_client_update(store: LightClientStore,
+ update: LightClientUpdate,
+ current_slot: Slot,
+ genesis_validators_root: Root) -> None:
+ # Verify sync committee has sufficient participants
+ sync_aggregate = update.sync_aggregate
+ assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
+
+ # Verify update does not skip a sync committee period
+ assert current_slot >= update.signature_slot > update.attested_header.slot >= update.finalized_header.slot
+ store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
+ update_signature_period = compute_sync_committee_period_at_slot(update.signature_slot)
+ if is_next_sync_committee_known(store):
+ assert update_signature_period in (store_period, store_period + 1)
+ else:
+ assert update_signature_period == store_period
+
+ # Verify update is relevant
+ update_attested_period = compute_sync_committee_period_at_slot(update.attested_header.slot)
+ update_has_next_sync_committee = not is_next_sync_committee_known(store) and (
+ is_sync_committee_update(update) and update_attested_period == store_period
+ )
+ assert (
+ update.attested_header.slot > store.finalized_header.slot
+ or update_has_next_sync_committee
+ )
+
+ # Verify that the `finality_branch`, if present, confirms `finalized_header`
+ # to match the finalized checkpoint root saved in the state of `attested_header`.
+ # Note that the genesis finalized checkpoint root is represented as a zero hash.
+ if not is_finality_update(update):
+ assert update.finalized_header == BeaconBlockHeader()
+ else:
+ if update.finalized_header.slot == GENESIS_SLOT:
+ assert update.finalized_header == BeaconBlockHeader()
+ finalized_root = Bytes32()
+ else:
+ finalized_root = hash_tree_root(update.finalized_header)
+ assert is_valid_merkle_branch(
+ leaf=finalized_root,
+ branch=update.finality_branch,
+ depth=floorlog2(FINALIZED_ROOT_INDEX),
+ index=get_subtree_index(FINALIZED_ROOT_INDEX),
+ root=update.attested_header.state_root,
+ )
+
+ # Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the
+ # state of the `attested_header`
+ if not is_sync_committee_update(update):
+ assert update.next_sync_committee == SyncCommittee()
+ else:
+ if update_attested_period == store_period and is_next_sync_committee_known(store):
+ assert update.next_sync_committee == store.next_sync_committee
+ assert is_valid_merkle_branch(
+ leaf=hash_tree_root(update.next_sync_committee),
+ branch=update.next_sync_committee_branch,
+ depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
+ index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
+ root=update.attested_header.state_root,
+ )
+
+ # Verify sync committee aggregate signature
+ if update_signature_period == store_period:
+ sync_committee = store.current_sync_committee
+ else:
+ sync_committee = store.next_sync_committee
+ participant_pubkeys = [
+ pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
+ if bit
+ ]
+ fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
+ domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
+ signing_root = compute_signing_root(update.attested_header, domain)
+ assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
+```
+
+### `apply_light_client_update`
+
+```python
+def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
+ store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
+ update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.slot)
+ if not is_next_sync_committee_known(store):
+ assert update_finalized_period == store_period
+ store.next_sync_committee = update.next_sync_committee
+ elif update_finalized_period == store_period + 1:
+ store.current_sync_committee = store.next_sync_committee
+ store.next_sync_committee = update.next_sync_committee
+ store.previous_max_active_participants = store.current_max_active_participants
+ store.current_max_active_participants = 0
+ if update.finalized_header.slot > store.finalized_header.slot:
+ store.finalized_header = update.finalized_header
+ if store.finalized_header.slot > store.optimistic_header.slot:
+ store.optimistic_header = store.finalized_header
+```
+
+### `process_light_client_store_force_update`
+
+```python
+def process_light_client_store_force_update(store: LightClientStore, current_slot: Slot) -> None:
+ if (
+ current_slot > store.finalized_header.slot + UPDATE_TIMEOUT
+ and store.best_valid_update is not None
+ ):
+ # Forced best update when the update timeout has elapsed.
+ # Because the apply logic waits for `finalized_header.slot` to indicate sync committee finality,
+ # the `attested_header` may be treated as `finalized_header` in extended periods of non-finality
+ # to guarantee progression into later sync committee periods according to `is_better_update`.
+ if store.best_valid_update.finalized_header.slot <= store.finalized_header.slot:
+ store.best_valid_update.finalized_header = store.best_valid_update.attested_header
+ apply_light_client_update(store, store.best_valid_update)
+ store.best_valid_update = None
+```
+
+### `process_light_client_update`
+
+```python
+def process_light_client_update(store: LightClientStore,
+ update: LightClientUpdate,
+ current_slot: Slot,
+ genesis_validators_root: Root) -> None:
+ validate_light_client_update(store, update, current_slot, genesis_validators_root)
+
+ sync_committee_bits = update.sync_aggregate.sync_committee_bits
+
+ # Update the best update in case we have to force-update to it if the timeout elapses
+ if (
+ store.best_valid_update is None
+ or is_better_update(update, store.best_valid_update)
+ ):
+ store.best_valid_update = update
+
+ # Track the maximum number of active participants in the committee signatures
+ store.current_max_active_participants = max(
+ store.current_max_active_participants,
+ sum(sync_committee_bits),
+ )
+
+ # Update the optimistic header
+ if (
+ sum(sync_committee_bits) > get_safety_threshold(store)
+ and update.attested_header.slot > store.optimistic_header.slot
+ ):
+ store.optimistic_header = update.attested_header
+
+ # Update finalized header
+ update_has_finalized_next_sync_committee = (
+ not is_next_sync_committee_known(store)
+ and is_sync_committee_update(update) and is_finality_update(update) and (
+ compute_sync_committee_period_at_slot(update.finalized_header.slot)
+ == compute_sync_committee_period_at_slot(update.attested_header.slot)
+ )
+ )
+ if (
+ sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
+ and (
+ update.finalized_header.slot > store.finalized_header.slot
+ or update_has_finalized_next_sync_committee
+ )
+ ):
+ # Normal update through 2/3 threshold
+ apply_light_client_update(store, update)
+ store.best_valid_update = None
+```
+
+### `process_light_client_finality_update`
+
+```python
+def process_light_client_finality_update(store: LightClientStore,
+ finality_update: LightClientFinalityUpdate,
+ current_slot: Slot,
+ genesis_validators_root: Root) -> None:
+ update = LightClientUpdate(
+ attested_header=finality_update.attested_header,
+ next_sync_committee=SyncCommittee(),
+ next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
+ finalized_header=finality_update.finalized_header,
+ finality_branch=finality_update.finality_branch,
+ sync_aggregate=finality_update.sync_aggregate,
+ signature_slot=finality_update.signature_slot,
+ )
+ process_light_client_update(store, update, current_slot, genesis_validators_root)
+```
+
+### `process_light_client_optimistic_update`
+
+```python
+def process_light_client_optimistic_update(store: LightClientStore,
+ optimistic_update: LightClientOptimisticUpdate,
+ current_slot: Slot,
+ genesis_validators_root: Root) -> None:
+ update = LightClientUpdate(
+ attested_header=optimistic_update.attested_header,
+ next_sync_committee=SyncCommittee(),
+ next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
+ finalized_header=BeaconBlockHeader(),
+ finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))],
+ sync_aggregate=optimistic_update.sync_aggregate,
+ signature_slot=optimistic_update.signature_slot,
+ )
+ process_light_client_update(store, update, current_slot, genesis_validators_root)
+```
diff --git a/specs/altair/sync-protocol.md b/specs/altair/sync-protocol.md
deleted file mode 100644
index 8751ba6e8..000000000
--- a/specs/altair/sync-protocol.md
+++ /dev/null
@@ -1,268 +0,0 @@
-# Altair -- Minimal Light Client
-
-**Notice**: This document is a work-in-progress for researchers and implementers.
-
-## Table of contents
-
-
-
-
-
-- [Introduction](#introduction)
-- [Constants](#constants)
-- [Preset](#preset)
- - [Misc](#misc)
-- [Containers](#containers)
- - [`LightClientUpdate`](#lightclientupdate)
- - [`LightClientStore`](#lightclientstore)
-- [Helper functions](#helper-functions)
- - [`is_finality_update`](#is_finality_update)
- - [`get_subtree_index`](#get_subtree_index)
- - [`get_active_header`](#get_active_header)
- - [`get_safety_threshold`](#get_safety_threshold)
-- [Light client state updates](#light-client-state-updates)
- - [`process_slot_for_light_client_store`](#process_slot_for_light_client_store)
- - [`validate_light_client_update`](#validate_light_client_update)
- - [`apply_light_client_update`](#apply_light_client_update)
- - [`process_light_client_update`](#process_light_client_update)
-
-
-
-
-## Introduction
-
-The beacon chain is designed to be light client friendly for constrained environments to
-access Ethereum with reasonable safety and liveness.
-Such environments include resource-constrained devices (e.g. phones for trust-minimised wallets)
-and metered VMs (e.g. blockchain VMs for cross-chain bridges).
-
-This document suggests a minimal light client design for the beacon chain that
-uses sync committees introduced in [this beacon chain extension](./beacon-chain.md).
-
-## Constants
-
-| Name | Value |
-| - | - |
-| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
-| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
-
-## Preset
-
-### Misc
-
-| Name | Value | Unit | Duration |
-| - | - | - | - |
-| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` | validators |
-| `UPDATE_TIMEOUT` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | slots | ~27.3 hours |
-
-## Containers
-
-### `LightClientUpdate`
-
-```python
-class LightClientUpdate(Container):
- # The beacon block header that is attested to by the sync committee
- attested_header: BeaconBlockHeader
- # Next sync committee corresponding to the active header
- next_sync_committee: SyncCommittee
- next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
- # The finalized beacon block header attested to by Merkle branch
- finalized_header: BeaconBlockHeader
- finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
- # Sync committee aggregate signature
- sync_aggregate: SyncAggregate
- # Fork version for the aggregate signature
- fork_version: Version
-```
-
-### `LightClientStore`
-
-```python
-@dataclass
-class LightClientStore(object):
- # Beacon block header that is finalized
- finalized_header: BeaconBlockHeader
- # Sync committees corresponding to the header
- current_sync_committee: SyncCommittee
- next_sync_committee: SyncCommittee
- # Best available header to switch finalized head to if we see nothing else
- best_valid_update: Optional[LightClientUpdate]
- # Most recent available reasonably-safe header
- optimistic_header: BeaconBlockHeader
- # Max number of active participants in a sync committee (used to calculate safety threshold)
- previous_max_active_participants: uint64
- current_max_active_participants: uint64
-```
-
-## Helper functions
-
-### `is_finality_update`
-
-```python
-def is_finality_update(update: LightClientUpdate) -> bool:
- return update.finalized_header != BeaconBlockHeader()
-```
-
-### `get_subtree_index`
-
-```python
-def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
- return uint64(generalized_index % 2**(floorlog2(generalized_index)))
-```
-
-### `get_active_header`
-
-```python
-def get_active_header(update: LightClientUpdate) -> BeaconBlockHeader:
- # The "active header" is the header that the update is trying to convince us
- # to accept. If a finalized header is present, it's the finalized header,
- # otherwise it's the attested header
- if is_finality_update(update):
- return update.finalized_header
- else:
- return update.attested_header
-```
-
-### `get_safety_threshold`
-
-```python
-def get_safety_threshold(store: LightClientStore) -> uint64:
- return max(
- store.previous_max_active_participants,
- store.current_max_active_participants,
- ) // 2
-```
-
-## Light client state updates
-
-A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot, genesis_validators_root)` where `current_slot` is the current slot based on a local clock. `process_slot_for_light_client_store` is triggered every time the current slot increments.
-
-#### `process_slot_for_light_client_store`
-
-```python
-def process_slot_for_light_client_store(store: LightClientStore, current_slot: Slot) -> None:
- if current_slot % UPDATE_TIMEOUT == 0:
- store.previous_max_active_participants = store.current_max_active_participants
- store.current_max_active_participants = 0
- if (
- current_slot > store.finalized_header.slot + UPDATE_TIMEOUT
- and store.best_valid_update is not None
- ):
- # Forced best update when the update timeout has elapsed
- apply_light_client_update(store, store.best_valid_update)
- store.best_valid_update = None
-```
-
-#### `validate_light_client_update`
-
-```python
-def validate_light_client_update(store: LightClientStore,
- update: LightClientUpdate,
- current_slot: Slot,
- genesis_validators_root: Root) -> None:
- # Verify update slot is larger than slot of current best finalized header
- active_header = get_active_header(update)
- assert current_slot >= active_header.slot > store.finalized_header.slot
-
- # Verify update does not skip a sync committee period
- finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
- update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
- assert update_period in (finalized_period, finalized_period + 1)
-
- # Verify that the `finalized_header`, if present, actually is the finalized header saved in the
- # state of the `attested header`
- if not is_finality_update(update):
- assert update.finality_branch == [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
- else:
- assert is_valid_merkle_branch(
- leaf=hash_tree_root(update.finalized_header),
- branch=update.finality_branch,
- depth=floorlog2(FINALIZED_ROOT_INDEX),
- index=get_subtree_index(FINALIZED_ROOT_INDEX),
- root=update.attested_header.state_root,
- )
-
- # Verify update next sync committee if the update period incremented
- if update_period == finalized_period:
- sync_committee = store.current_sync_committee
- assert update.next_sync_committee_branch == [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
- else:
- sync_committee = store.next_sync_committee
- assert is_valid_merkle_branch(
- leaf=hash_tree_root(update.next_sync_committee),
- branch=update.next_sync_committee_branch,
- depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
- index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
- root=active_header.state_root,
- )
-
- sync_aggregate = update.sync_aggregate
-
- # Verify sync committee has sufficient participants
- assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
-
- # Verify sync committee aggregate signature
- participant_pubkeys = [
- pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
- if bit
- ]
- domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version, genesis_validators_root)
- signing_root = compute_signing_root(update.attested_header, domain)
- assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
-```
-
-#### `apply_light_client_update`
-
-```python
-def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
- active_header = get_active_header(update)
- finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
- update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
- if update_period == finalized_period + 1:
- store.current_sync_committee = store.next_sync_committee
- store.next_sync_committee = update.next_sync_committee
- store.finalized_header = active_header
- if store.finalized_header.slot > store.optimistic_header.slot:
- store.optimistic_header = store.finalized_header
-```
-
-#### `process_light_client_update`
-
-```python
-def process_light_client_update(store: LightClientStore,
- update: LightClientUpdate,
- current_slot: Slot,
- genesis_validators_root: Root) -> None:
- validate_light_client_update(store, update, current_slot, genesis_validators_root)
-
- sync_committee_bits = update.sync_aggregate.sync_committee_bits
-
- # Update the best update in case we have to force-update to it if the timeout elapses
- if (
- store.best_valid_update is None
- or sum(sync_committee_bits) > sum(store.best_valid_update.sync_aggregate.sync_committee_bits)
- ):
- store.best_valid_update = update
-
- # Track the maximum number of active participants in the committee signatures
- store.current_max_active_participants = max(
- store.current_max_active_participants,
- sum(sync_committee_bits),
- )
-
- # Update the optimistic header
- if (
- sum(sync_committee_bits) > get_safety_threshold(store)
- and update.attested_header.slot > store.optimistic_header.slot
- ):
- store.optimistic_header = update.attested_header
-
- # Update finalized header
- if (
- sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
- and is_finality_update(update)
- ):
- # Normal update through 2/3 threshold
- apply_light_client_update(store, update)
- store.best_valid_update = None
-```
diff --git a/specs/altair/validator.md b/specs/altair/validator.md
index c59aa29f7..626535374 100644
--- a/specs/altair/validator.md
+++ b/specs/altair/validator.md
@@ -54,7 +54,7 @@ It builds on the [previous document for the behavior of an "honest validator" fr
This previous document is referred to below as the "Phase 0 document".
Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees.
-See the [sync protocol](./sync-protocol.md) for further details on the light client sync.
+See the [sync protocol](./light-client/sync-protocol.md) for further details on the light client sync.
Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required.
Block proposers incorporate the (aggregated) sync committee signatures into each block they produce.
@@ -265,7 +265,7 @@ This process occurs each slot.
##### Prepare sync committee message
-If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. This means that when assigned to `slot` a `SyncCommitteeMessage` is prepared and broadcast in `slot-1 ` instead of `slot`.
+If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. This means that when assigned to `slot` a `SyncCommitteeMessage` is prepared and broadcast in `slot-1 ` instead of `slot`.
This logic is triggered upon the same conditions as when producing an attestation.
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeMessage` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot) -- whichever comes first.
diff --git a/specs/bellatrix/beacon-chain.md b/specs/bellatrix/beacon-chain.md
index b37a8ab71..6d39c1ae8 100644
--- a/specs/bellatrix/beacon-chain.md
+++ b/specs/bellatrix/beacon-chain.md
@@ -91,7 +91,7 @@ Bellatrix updates a few configuration values to move penalty parameters to their
| Name | Value |
| - | - |
-| `TERMINAL_TOTAL_DIFFICULTY` | **TBD** |
+| `TERMINAL_TOTAL_DIFFICULTY` | `58750000000000000000000` (Estimated: Sept 15, 2022)|
| `TERMINAL_BLOCK_HASH` | `Hash32()` |
| `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH` | `FAR_FUTURE_EPOCH` |
@@ -298,8 +298,6 @@ def slash_validator(state: BeaconState,
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
```
-
-
## Beacon chain state transition function
### Execution engine
@@ -399,7 +397,7 @@ def process_slashings(state: BeaconState) -> None:
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Bellatrix testing only.
Modifications include:
-1. Use `BELLATRIX_FORK_VERSION` as the current fork version.
+1. Use `BELLATRIX_FORK_VERSION` as the previous and current fork version.
2. Utilize the Bellatrix `BeaconBlockBody` when constructing the initial `latest_block_header`.
3. Initialize `latest_execution_payload_header`.
If `execution_payload_header == ExecutionPayloadHeader()`, then the Merge has not yet occurred.
diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md
index 60a54da9c..312768e44 100644
--- a/specs/bellatrix/fork-choice.md
+++ b/specs/bellatrix/fork-choice.md
@@ -165,7 +165,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
assert block.parent_root in store.block_states
# Make a copy of the state to avoid mutability issues
pre_state = copy(store.block_states[block.parent_root])
- # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
+ # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past.
assert get_current_slot(store) >= block.slot
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
diff --git a/specs/bellatrix/fork.md b/specs/bellatrix/fork.md
index 8cb34099b..19700e383 100644
--- a/specs/bellatrix/fork.md
+++ b/specs/bellatrix/fork.md
@@ -9,6 +9,9 @@
- [Introduction](#introduction)
- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [Modified `compute_fork_version`](#modified-compute_fork_version)
- [Fork to Bellatrix](#fork-to-bellatrix)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
@@ -26,7 +29,25 @@ Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `BELLATRIX_FORK_VERSION` | `Version('0x02000000')` |
-| `BELLATRIX_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
+| `BELLATRIX_FORK_EPOCH` | `Epoch(144896)` (Sept 6, 2022, 11:34:47am UTC) |
+
+## Helper functions
+
+### Misc
+
+#### Modified `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= BELLATRIX_FORK_EPOCH:
+ return BELLATRIX_FORK_VERSION
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
## Fork to Bellatrix
diff --git a/specs/capella/beacon-chain.md b/specs/capella/beacon-chain.md
index f940b3a27..40592a9bd 100644
--- a/specs/capella/beacon-chain.md
+++ b/specs/capella/beacon-chain.md
@@ -11,6 +11,7 @@
- [Constants](#constants)
- [Domain types](#domain-types)
- [Preset](#preset)
+ - [Misc](#misc)
- [State list lengths](#state-list-lengths)
- [Max operations per block](#max-operations-per-block)
- [Execution](#execution)
@@ -30,15 +31,19 @@
- [Beacon state mutators](#beacon-state-mutators)
- [`withdraw`](#withdraw)
- [Predicates](#predicates)
+ - [`has_eth1_withdrawal_credential`](#has_eth1_withdrawal_credential)
- [`is_fully_withdrawable_validator`](#is_fully_withdrawable_validator)
+ - [`is_partially_withdrawable_validator`](#is_partially_withdrawable_validator)
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
- [Epoch processing](#epoch-processing)
- - [Withdrawals](#withdrawals)
+ - [Full withdrawals](#full-withdrawals)
+ - [Partial withdrawals](#partial-withdrawals)
- [Block processing](#block-processing)
- [New `process_withdrawals`](#new-process_withdrawals)
- [Modified `process_execution_payload`](#modified-process_execution_payload)
- [Modified `process_operations`](#modified-process_operations)
- [New `process_bls_to_execution_change`](#new-process_bls_to_execution_change)
+- [Testing](#testing)
@@ -48,7 +53,8 @@
Capella is a consensus-layer upgrade containing a number of features related
to validator withdrawals. Including:
* Automatic withdrawals of `withdrawable` validators
-* Partial withdrawals during block proposal
+* Partial withdrawals sweep for validators with 0x01 withdrawal
+ credentials and balances in exceess of `MAX_EFFECTIVE_BALANCE`
* Operation to change from `BLS_WITHDRAWAL_PREFIX` to
`ETH1_ADDRESS_WITHDRAWAL_PREFIX` versioned withdrawal credentials to enable withdrawals for a validator
@@ -70,11 +76,17 @@ We define the following Python custom types for type hinting and readability:
## Preset
+### Misc
+
+| Name | Value |
+| - | - |
+| `MAX_PARTIAL_WITHDRAWALS_PER_EPOCH` | `uint64(2**8)` (= 256) |
+
### State list lengths
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
-| `WITHDRAWALS_QUEUE_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | withdrawals enqueued in state|
+| `WITHDRAWAL_QUEUE_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | withdrawals enqueued in state|
### Max operations per block
@@ -245,8 +257,9 @@ class BeaconState(Container):
# Execution
latest_execution_payload_header: ExecutionPayloadHeader
# Withdrawals
- withdrawal_index: WithdrawalIndex
- withdrawals_queue: List[Withdrawal, WITHDRAWALS_QUEUE_LIMIT] # [New in Capella]
+ withdrawal_queue: List[Withdrawal, WITHDRAWAL_QUEUE_LIMIT] # [New in Capella]
+ next_withdrawal_index: WithdrawalIndex # [New in Capella]
+ next_partial_withdrawal_validator_index: ValidatorIndex # [New in Capella]
```
## Helpers
@@ -256,21 +269,31 @@ class BeaconState(Container):
#### `withdraw`
```python
-def withdraw_balance(state: BeaconState, index: ValidatorIndex, amount: Gwei) -> None:
+def withdraw_balance(state: BeaconState, validator_index: ValidatorIndex, amount: Gwei) -> None:
# Decrease the validator's balance
- decrease_balance(state, index, amount)
+ decrease_balance(state, validator_index, amount)
# Create a corresponding withdrawal receipt
withdrawal = Withdrawal(
- index=state.withdrawal_index,
- address=state.validators[index].withdrawal_credentials[12:],
+ index=state.next_withdrawal_index,
+ address=ExecutionAddress(state.validators[validator_index].withdrawal_credentials[12:]),
amount=amount,
)
- state.withdrawal_index = WithdrawalIndex(state.withdrawal_index + 1)
- state.withdrawals_queue.append(withdrawal)
+ state.next_withdrawal_index = WithdrawalIndex(state.next_withdrawal_index + 1)
+ state.withdrawal_queue.append(withdrawal)
```
### Predicates
+#### `has_eth1_withdrawal_credential`
+
+```python
+def has_eth1_withdrawal_credential(validator: Validator) -> bool:
+ """
+ Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential
+ """
+ return validator.withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
+```
+
#### `is_fully_withdrawable_validator`
```python
@@ -278,8 +301,22 @@ def is_fully_withdrawable_validator(validator: Validator, epoch: Epoch) -> bool:
"""
Check if ``validator`` is fully withdrawable.
"""
- is_eth1_withdrawal_prefix = validator.withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
- return is_eth1_withdrawal_prefix and validator.withdrawable_epoch <= epoch < validator.fully_withdrawn_epoch
+ return (
+ has_eth1_withdrawal_credential(validator)
+ and validator.withdrawable_epoch <= epoch < validator.fully_withdrawn_epoch
+ )
+```
+
+#### `is_partially_withdrawable_validator`
+
+```python
+def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool:
+ """
+ Check if ``validator`` is partially withdrawable.
+ """
+ has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
+ has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
+ return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
```
## Beacon chain state transition function
@@ -301,9 +338,11 @@ def process_epoch(state: BeaconState) -> None:
process_participation_flag_updates(state)
process_sync_committee_updates(state)
process_full_withdrawals(state) # [New in Capella]
+ process_partial_withdrawals(state) # [New in Capella]
+
```
-#### Withdrawals
+#### Full withdrawals
*Note*: The function `process_full_withdrawals` is new.
@@ -317,6 +356,31 @@ def process_full_withdrawals(state: BeaconState) -> None:
validator.fully_withdrawn_epoch = current_epoch
```
+#### Partial withdrawals
+
+*Note*: The function `process_partial_withdrawals` is new.
+
+```python
+def process_partial_withdrawals(state: BeaconState) -> None:
+ partial_withdrawals_count = 0
+ # Begin where we left off last time
+ validator_index = state.next_partial_withdrawal_validator_index
+ for _ in range(len(state.validators)):
+ balance = state.balances[validator_index]
+ validator = state.validators[validator_index]
+ if is_partially_withdrawable_validator(validator, balance):
+ withdraw_balance(state, validator_index, balance - MAX_EFFECTIVE_BALANCE)
+ partial_withdrawals_count += 1
+
+ # Iterate to next validator to check for partial withdrawal
+ validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
+ # Exit if performed maximum allowable withdrawals
+ if partial_withdrawals_count == MAX_PARTIAL_WITHDRAWALS_PER_EPOCH:
+ break
+
+ state.next_partial_withdrawal_validator_index = validator_index
+```
+
### Block processing
```python
@@ -335,15 +399,15 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
```python
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
- num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawals_queue))
- dequeued_withdrawals = state.withdrawals_queue[:num_withdrawals]
+ num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
+ dequeued_withdrawals = state.withdrawal_queue[:num_withdrawals]
assert len(dequeued_withdrawals) == len(payload.withdrawals)
for dequeued_withdrawal, withdrawal in zip(dequeued_withdrawals, payload.withdrawals):
assert dequeued_withdrawal == withdrawal
# Remove dequeued withdrawals from state
- state.withdrawals_queue = state.withdrawals_queue[num_withdrawals:]
+ state.withdrawal_queue = state.withdrawal_queue[num_withdrawals:]
```
#### Modified `process_execution_payload`
@@ -426,3 +490,58 @@ def process_bls_to_execution_change(state: BeaconState,
+ address_change.to_execution_address
)
```
+
+## Testing
+
+*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Capella testing only.
+Modifications include:
+1. Use `CAPELLA_FORK_VERSION` as the previous and current fork version.
+2. Utilize the Capella `BeaconBlockBody` when constructing the initial `latest_block_header`.
+
+```python
+def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
+ eth1_timestamp: uint64,
+ deposits: Sequence[Deposit],
+ execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
+ ) -> BeaconState:
+ fork = Fork(
+ previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only
+ current_version=CAPELLA_FORK_VERSION, # [Modified in Capella]
+ epoch=GENESIS_EPOCH,
+ )
+ state = BeaconState(
+ genesis_time=eth1_timestamp + GENESIS_DELAY,
+ fork=fork,
+ eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
+ latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
+ randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
+ )
+
+ # Process deposits
+ leaves = list(map(lambda deposit: deposit.data, deposits))
+ for index, deposit in enumerate(deposits):
+ deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
+ state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
+ process_deposit(state, deposit)
+
+ # Process activations
+ for index, validator in enumerate(state.validators):
+ balance = state.balances[index]
+ validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
+ if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
+ validator.activation_eligibility_epoch = GENESIS_EPOCH
+ validator.activation_epoch = GENESIS_EPOCH
+
+ # Set genesis validators root for domain separation and chain versioning
+ state.genesis_validators_root = hash_tree_root(state.validators)
+
+ # Fill in sync committees
+ # Note: A duplicate committee is assigned for the current and next committee at genesis
+ state.current_sync_committee = get_next_sync_committee(state)
+ state.next_sync_committee = get_next_sync_committee(state)
+
+ # Initialize the execution payload header
+ state.latest_execution_payload_header = execution_payload_header
+
+ return state
+```
diff --git a/specs/capella/fork.md b/specs/capella/fork.md
index 5f015a4ff..c22387ee7 100644
--- a/specs/capella/fork.md
+++ b/specs/capella/fork.md
@@ -7,6 +7,9 @@
- [Introduction](#introduction)
- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [Modified `compute_fork_version`](#modified-compute_fork_version)
- [Fork to Capella](#fork-to-capella)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
@@ -27,6 +30,26 @@ Warning: this configuration is not definitive.
| `CAPELLA_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
+## Helper functions
+
+### Misc
+
+#### Modified `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= CAPELLA_FORK_EPOCH:
+ return CAPELLA_FORK_VERSION
+ if epoch >= BELLATRIX_FORK_EPOCH:
+ return BELLATRIX_FORK_VERSION
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
+
## Fork to Capella
### Fork trigger
@@ -89,8 +112,9 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
# Execution-layer
latest_execution_payload_header=pre.latest_execution_payload_header,
# Withdrawals
- withdrawal_index=WithdrawalIndex(0),
- withdrawals_queue=[],
+ withdrawal_queue=[],
+ next_withdrawal_index=WithdrawalIndex(0),
+ next_partial_withdrawal_validator_index=ValidatorIndex(0),
)
for pre_validator in pre.validators:
diff --git a/specs/capella/validator.md b/specs/capella/validator.md
index 8c6c860a3..85dbd7e00 100644
--- a/specs/capella/validator.md
+++ b/specs/capella/validator.md
@@ -61,8 +61,8 @@ helper `get_expected_withdrawals`) and passed into the `ExecutionEngine` within
```python
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
- num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawals_queue))
- return state.withdrawals_queue[:num_withdrawals]
+ num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
+ return state.withdrawal_queue[:num_withdrawals]
```
*Note*: The only change made to `prepare_execution_payload` is to call
diff --git a/specs/eip4844/beacon-chain.md b/specs/eip4844/beacon-chain.md
index 8c84a2862..a1385d9e2 100644
--- a/specs/eip4844/beacon-chain.md
+++ b/specs/eip4844/beacon-chain.md
@@ -11,23 +11,22 @@
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
+ - [Blob](#blob)
- [Domain types](#domain-types)
- [Preset](#preset)
- - [Trusted setup](#trusted-setup)
+ - [Execution](#execution)
- [Configuration](#configuration)
- [Containers](#containers)
- [Extended containers](#extended-containers)
- [`BeaconBlockBody`](#beaconblockbody)
- [Helper functions](#helper-functions)
- - [KZG core](#kzg-core)
- - [`blob_to_kzg`](#blob_to_kzg)
- - [`kzg_to_versioned_hash`](#kzg_to_versioned_hash)
- [Misc](#misc)
+ - [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash)
- [`tx_peek_blob_versioned_hashes`](#tx_peek_blob_versioned_hashes)
- - [`verify_kzgs_against_transactions`](#verify_kzgs_against_transactions)
+ - [`verify_kzg_commitments_against_transactions`](#verify_kzg_commitments_against_transactions)
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
- [Block processing](#block-processing)
- - [Blob KZGs](#blob-kzgs)
+ - [Blob KZG commitments](#blob-kzg-commitments)
- [Testing](#testing)
@@ -41,18 +40,19 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844.
| Name | SSZ equivalent | Description |
| - | - | - |
-| `BLSFieldElement` | `uint256` | `x < BLS_MODULUS` |
| `Blob` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | |
| `VersionedHash` | `Bytes32` | |
| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
## Constants
+### Blob
+
| Name | Value |
| - | - |
| `BLOB_TX_TYPE` | `uint8(0x05)` |
-| `FIELD_ELEMENTS_PER_BLOB` | `4096` |
-| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
+| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
+| `VERSIONED_HASH_VERSION_KZG` | `Bytes1(0x01)` |
### Domain types
@@ -62,15 +62,11 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844.
## Preset
-### Trusted setup
-
-The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
-but reusing the `mainnet` settings in public networks is a critical security requirement.
+### Execution
| Name | Value |
| - | - |
-| `KZG_SETUP_G2` | `Vector[G2Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
-| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
+| `MAX_BLOBS_PER_BLOCK` | `uint64(2**4)` (= 16) |
## Configuration
@@ -97,61 +93,51 @@ class BeaconBlockBody(Container):
sync_aggregate: SyncAggregate
# Execution
execution_payload: ExecutionPayload
- blob_kzgs: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
+ blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
```
## Helper functions
-### KZG core
-
-KZG core functions. These are also defined in EIP-4844 execution specs.
-
-#### `blob_to_kzg`
-
-```python
-def blob_to_kzg(blob: Blob) -> KZGCommitment:
- computed_kzg = bls.Z1
- for value, point_kzg in zip(blob, KZG_SETUP_LAGRANGE):
- assert value < BLS_MODULUS
- computed_kzg = bls.add(
- computed_kzg,
- bls.multiply(point_kzg, value)
- )
- return computed_kzg
-```
-
-#### `kzg_to_versioned_hash`
-
-```python
-def kzg_to_versioned_hash(kzg: KZGCommitment) -> VersionedHash:
- return BLOB_COMMITMENT_VERSION_KZG + hash(kzg)[1:]
-```
-
### Misc
+#### `kzg_commitment_to_versioned_hash`
+
+```python
+def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> VersionedHash:
+ return VERSIONED_HASH_VERSION_KZG + hash(kzg_commitment)[1:]
+```
+
#### `tx_peek_blob_versioned_hashes`
This function retrieves the hashes from the `SignedBlobTransaction` as defined in EIP-4844, using SSZ offsets.
Offsets are little-endian `uint32` values, as defined in the [SSZ specification](../../ssz/simple-serialize.md).
+See [the full details of `blob_versioned_hashes` offset calculation](https://gist.github.com/protolambda/23bd106b66f6d4bb854ce46044aa3ca3).
```python
def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedHash]:
assert opaque_tx[0] == BLOB_TX_TYPE
message_offset = 1 + uint32.decode_bytes(opaque_tx[1:5])
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 = 156
- blob_versioned_hashes_offset = uint32.decode_bytes(opaque_tx[message_offset+156:message_offset+160])
- return [VersionedHash(opaque_tx[x:x+32]) for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32)]
+ blob_versioned_hashes_offset = (
+ message_offset
+ + uint32.decode_bytes(opaque_tx[(message_offset + 156):(message_offset + 160)])
+ )
+ return [
+ VersionedHash(opaque_tx[x:(x + 32)])
+ for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32)
+ ]
```
-#### `verify_kzgs_against_transactions`
+#### `verify_kzg_commitments_against_transactions`
```python
-def verify_kzgs_against_transactions(transactions: Sequence[Transaction], blob_kzgs: Sequence[KZGCommitment]) -> bool:
- all_versioned_hashes = []
- for tx in transactions:
- if tx[0] == BLOB_TX_TYPE:
- all_versioned_hashes.extend(tx_peek_blob_versioned_hashes(tx))
- return all_versioned_hashes == [kzg_to_versioned_hash(kzg) for kzg in blob_kzgs]
+def verify_kzg_commitments_against_transactions(transactions: Sequence[Transaction],
+ kzg_commitments: Sequence[KZGCommitment]) -> bool:
+ all_versioned_hashes = []
+ for tx in transactions:
+ if tx[0] == BLOB_TX_TYPE:
+ all_versioned_hashes += tx_peek_blob_versioned_hashes(tx)
+ return all_versioned_hashes == [kzg_commitment_to_versioned_hash(commitment) for commitment in kzg_commitments]
```
## Beacon chain state transition function
@@ -167,14 +153,14 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_eth1_data(state, block.body)
process_operations(state, block.body)
process_sync_aggregate(state, block.body.sync_aggregate)
- process_blob_kzgs(state, block.body) # [New in EIP-4844]
+ process_blob_kzg_commitments(state, block.body) # [New in EIP-4844]
```
-#### Blob KZGs
+#### Blob KZG commitments
```python
-def process_blob_kzgs(state: BeaconState, body: BeaconBlockBody):
- assert verify_kzgs_against_transactions(body.execution_payload.transactions, body.blob_kzgs)
+def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
+ assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
```
## Testing
@@ -182,9 +168,53 @@ def process_blob_kzgs(state: BeaconState, body: BeaconBlockBody):
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only.
The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type
-when initializing the first body-root:
+when initializing the first body-root.
```python
-state.latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
-```
+def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
+ eth1_timestamp: uint64,
+ deposits: Sequence[Deposit],
+ execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
+ ) -> BeaconState:
+ fork = Fork(
+ previous_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844] for testing only
+ current_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844]
+ epoch=GENESIS_EPOCH,
+ )
+ state = BeaconState(
+ genesis_time=eth1_timestamp + GENESIS_DELAY,
+ fork=fork,
+ eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
+ latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
+ randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
+ )
+ # Process deposits
+ leaves = list(map(lambda deposit: deposit.data, deposits))
+ for index, deposit in enumerate(deposits):
+ deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
+ state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
+ process_deposit(state, deposit)
+
+ # Process activations
+ for index, validator in enumerate(state.validators):
+ balance = state.balances[index]
+ validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
+ if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
+ validator.activation_eligibility_epoch = GENESIS_EPOCH
+ validator.activation_epoch = GENESIS_EPOCH
+
+ # Set genesis validators root for domain separation and chain versioning
+ state.genesis_validators_root = hash_tree_root(state.validators)
+
+ # Fill in sync committees
+ # Note: A duplicate committee is assigned for the current and next committee at genesis
+ state.current_sync_committee = get_next_sync_committee(state)
+ state.next_sync_committee = get_next_sync_committee(state)
+
+ # Initialize the execution payload header
+ # If empty, will initialize a chain that has not yet gone through the Merge transition
+ state.latest_execution_payload_header = execution_payload_header
+
+ return state
+```
diff --git a/specs/eip4844/fork.md b/specs/eip4844/fork.md
index ad1b00b79..eaabba916 100644
--- a/specs/eip4844/fork.md
+++ b/specs/eip4844/fork.md
@@ -9,6 +9,9 @@
- [Introduction](#introduction)
- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [Modified `compute_fork_version`](#modified-compute_fork_version)
- [Fork to EIP-4844](#fork-to-eip-4844)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
@@ -25,9 +28,29 @@ Warning: this configuration is not definitive.
| Name | Value |
| - | - |
-| `EIP4844_FORK_VERSION` | `Version('0x03000000')` |
+| `EIP4844_FORK_VERSION` | `Version('0x04000000')` |
| `EIP4844_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
+## Helper functions
+
+### Misc
+
+#### Modified `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= EIP4844_FORK_EPOCH:
+ return EIP4844_FORK_VERSION
+ if epoch >= BELLATRIX_FORK_EPOCH:
+ return BELLATRIX_FORK_VERSION
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
+
## Fork to EIP-4844
### Fork trigger
@@ -39,5 +62,54 @@ Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` si
### Upgrading the state
-The `eip4844.BeaconState` format is equal to the `bellatrix.BeaconState` format, no upgrade has to be performed.
+Since the `eip4844.BeaconState` format is equal to the `bellatrix.BeaconState` format, we only have to update `BeaconState.fork`.
+```python
+def upgrade_to_eip4844(pre: bellatrix.BeaconState) -> BeaconState:
+ # TODO: if Capella gets scheduled, add sync it with Capella.BeaconState
+ epoch = bellatrix.get_current_epoch(pre)
+ post = BeaconState(
+ # Versioning
+ genesis_time=pre.genesis_time,
+ genesis_validators_root=pre.genesis_validators_root,
+ slot=pre.slot,
+ fork=Fork(
+ previous_version=pre.fork.current_version,
+ current_version=EIP4844_FORK_VERSION, # [Modified in EIP4844]
+ epoch=epoch,
+ ),
+ # History
+ latest_block_header=pre.latest_block_header,
+ block_roots=pre.block_roots,
+ state_roots=pre.state_roots,
+ historical_roots=pre.historical_roots,
+ # Eth1
+ eth1_data=pre.eth1_data,
+ eth1_data_votes=pre.eth1_data_votes,
+ eth1_deposit_index=pre.eth1_deposit_index,
+ # Registry
+ validators=pre.validators,
+ balances=pre.balances,
+ # Randomness
+ randao_mixes=pre.randao_mixes,
+ # Slashings
+ slashings=pre.slashings,
+ # Participation
+ previous_epoch_participation=pre.previous_epoch_participation,
+ current_epoch_participation=pre.current_epoch_participation,
+ # Finality
+ justification_bits=pre.justification_bits,
+ previous_justified_checkpoint=pre.previous_justified_checkpoint,
+ current_justified_checkpoint=pre.current_justified_checkpoint,
+ finalized_checkpoint=pre.finalized_checkpoint,
+ # Inactivity
+ inactivity_scores=pre.inactivity_scores,
+ # Sync
+ current_sync_committee=pre.current_sync_committee,
+ next_sync_committee=pre.next_sync_committee,
+ # Execution-layer
+ latest_execution_payload_header=pre.latest_execution_payload_header,
+ )
+
+ return post
+```
diff --git a/specs/eip4844/p2p-interface.md b/specs/eip4844/p2p-interface.md
index ff2a11e25..9bd206127 100644
--- a/specs/eip4844/p2p-interface.md
+++ b/specs/eip4844/p2p-interface.md
@@ -10,7 +10,6 @@ The specification of these changes continues in the same format as the network s
- - [Preset](#preset)
- [Configuration](#configuration)
- [Containers](#containers)
- [`BlobsSidecar`](#blobssidecar)
@@ -32,13 +31,6 @@ The specification of these changes continues in the same format as the network s
-
-## Preset
-
-| Name | Value |
-| - | - |
-| `MAX_BLOBS_PER_BLOCK` | `uint64(2**4)` (= 16) |
-
## Configuration
| Name | Value | Description |
@@ -46,8 +38,6 @@ The specification of these changes continues in the same format as the network s
| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**13` (= 8192, ~1.2 months) | The minimum epoch range over which a node must serve blobs sidecars |
-
-
## Containers
### `BlobsSidecar`
@@ -57,6 +47,7 @@ class BlobsSidecar(Container):
beacon_block_root: Root
beacon_block_slot: Slot
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
+ kzg_aggregated_proof: KZGProof
```
### `SignedBlobsSidecar`
@@ -67,7 +58,6 @@ class SignedBlobsSidecar(Container):
signature: BLSSignature
```
-
## The gossip domain: gossipsub
Some gossip meshes are upgraded in the fork of EIP4844 to support upgraded types.
@@ -102,9 +92,9 @@ In addition to the gossip validations for this topic from prior specifications,
the following validations MUST pass before forwarding the `signed_beacon_block` on the network.
Alias `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 Points.
- -- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzgs)`
+ -- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzg_commitments)`
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list.
- -- i.e. `verify_kzgs_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzgs)`
+ -- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
##### `blobs_sidecar`
@@ -112,21 +102,20 @@ This topic is used to propagate data blobs included in any given beacon block.
The following validations MUST pass before forwarding the `signed_blobs_sidecar` on the network;
Alias `sidecar = signed_blobs_sidecar.message`.
-- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `blobs_sidecar.beacon_block_slot == current_slot`.
+- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `sidecar.beacon_block_slot == current_slot`.
- _[REJECT]_ the `sidecar.blobs` are all well formatted, i.e. the `BLSFieldElement` in valid range (`x < BLS_MODULUS`).
+- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 Point -- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)
- _[REJECT]_ the beacon proposer signature, `signed_blobs_sidecar.signature`, is valid -- i.e.
-```python
-domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, blobs_sidecar.beacon_block_slot // SLOTS_PER_EPOCH)
-signing_root = compute_signing_root(blobs_sidecar, domain)
-assert bls.Verify(proposer_pubkey, signing_root, signed_blob_header.signature)
-```
- where `proposer_pubkey` is the pubkey of the beacon block proposer of `blobs_sidecar.beacon_block_slot`
+ - Let `domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, sidecar.beacon_block_slot // SLOTS_PER_EPOCH)`
+ - Let `signing_root = compute_signing_root(sidecar, domain)`
+ - Verify `bls.Verify(proposer_pubkey, signing_root, signed_blob_header.signature) is True`,
+ where `proposer_pubkey` is the pubkey of the beacon block proposer of `sidecar.beacon_block_slot`
- _[IGNORE]_ The sidecar is the first sidecar with valid signature received for the `(proposer_index, sidecar.beacon_block_slot)` combination,
- where `proposer_index` is the validator index of the beacon block proposer of `blobs_sidecar.beacon_block_slot`
+ where `proposer_index` is the validator index of the beacon block proposer of `sidecar.beacon_block_slot`
Note that a sidecar may be propagated before or after the corresponding beacon block.
-Once both sidecar and beacon block are received, `verify_blobs_sidecar` can unlock the data-availability fork-choice dependency.
+Once both sidecar and beacon block are received, `validate_blobs_sidecar` can unlock the data-availability fork-choice dependency.
### Transitioning the gossip
@@ -197,14 +186,14 @@ The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the b
may not be available beyond the initial distribution via gossip.
Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and
-correct w.r.t. the expected KZG commitments through `verify_blobs_sidecar`.
+correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`.
`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip.
The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`.
-Each _successful_ `response_chunk` MUST contain a single `SignedBlobsSidecar` payload.
+Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload.
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
`[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS), current_epoch]`
@@ -245,8 +234,6 @@ Clients MUST respond with blobs sidecars that are consistent from a single chain
After the initial blobs sidecar, clients MAY stop in the process of responding
if their fork choice changes the view of the chain in the context of the request.
-
-
# Design decision rationale
## Why are blobs relayed as a sidecar, separate from beacon blocks?
@@ -257,4 +244,3 @@ thus avoiding all blobs being downloaded by all beacon nodes on the network.
Such sharding design may introduce an updated `BlobsSidecar` to identify the shard,
but does not affect the `BeaconBlock` structure.
-
diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md
new file mode 100644
index 000000000..f66e3eb2e
--- /dev/null
+++ b/specs/eip4844/polynomial-commitments.md
@@ -0,0 +1,196 @@
+# EIP-4844 -- Polynomial Commitments
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Custom types](#custom-types)
+- [Constants](#constants)
+- [Preset](#preset)
+ - [Trusted setup](#trusted-setup)
+- [Helper functions](#helper-functions)
+ - [BLS12-381 helpers](#bls12-381-helpers)
+ - [`bls_modular_inverse`](#bls_modular_inverse)
+ - [`div`](#div)
+ - [`lincomb`](#lincomb)
+ - [`matrix_lincomb`](#matrix_lincomb)
+ - [KZG](#kzg)
+ - [`blob_to_kzg_commitment`](#blob_to_kzg_commitment)
+ - [`verify_kzg_proof`](#verify_kzg_proof)
+ - [`compute_kzg_proof`](#compute_kzg_proof)
+ - [Polynomials](#polynomials)
+ - [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
+
+
+
+
+
+## Introduction
+
+This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the EIP-4844 specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
+
+## Custom types
+
+| Name | SSZ equivalent | Description |
+| - | - | - |
+| `G1Point` | `Bytes48` | |
+| `G2Point` | `Bytes96` | |
+| `BLSFieldElement` | `uint256` | `x < BLS_MODULUS` |
+| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
+| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
+
+## Constants
+
+| Name | Value | Notes |
+| - | - | - |
+| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
+| `ROOTS_OF_UNITY` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | Roots of unity of order FIELD_ELEMENTS_PER_BLOB over the BLS12-381 field |
+
+## Preset
+
+### Trusted setup
+
+The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
+but reusing the `mainnet` settings in public networks is a critical security requirement.
+
+| Name | Value |
+| - | - |
+| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
+| `KZG_SETUP_G2` | `Vector[G2Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
+| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
+
+## Helper functions
+
+### BLS12-381 helpers
+
+#### `bls_modular_inverse`
+
+```python
+def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
+ """
+ Compute the modular inverse of x
+ i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
+ """
+ return pow(x, -1, BLS_MODULUS) if x != 0 else 0
+```
+
+#### `div`
+
+```python
+def div(x: BLSFieldElement, y: BLSFieldElement) -> BLSFieldElement:
+ """Divide two field elements: `x` by `y`"""
+ return (int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS
+```
+
+#### `lincomb`
+
+```python
+def lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> KZGCommitment:
+ """
+ BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
+ """
+ assert len(points) == len(scalars)
+ result = bls.Z1
+ for x, a in zip(points, scalars):
+ result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a))
+ return KZGCommitment(bls.G1_to_bytes48(result))
+```
+
+#### `matrix_lincomb`
+
+```python
+def matrix_lincomb(vectors: Sequence[Sequence[BLSFieldElement]],
+ scalars: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
+ """
+ Given a list of ``vectors``, interpret it as a 2D matrix and compute the linear combination
+ of each column with `scalars`: return the resulting vector.
+ """
+ result = [0] * len(vectors[0])
+ for v, s in zip(vectors, scalars):
+ for i, x in enumerate(v):
+ result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS
+ return [BLSFieldElement(x) for x in result]
+```
+
+### KZG
+
+KZG core functions. These are also defined in EIP-4844 execution specs.
+
+#### `blob_to_kzg_commitment`
+
+```python
+def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
+ return lincomb(KZG_SETUP_LAGRANGE, blob)
+```
+
+#### `verify_kzg_proof`
+
+```python
+def verify_kzg_proof(polynomial_kzg: KZGCommitment,
+ z: BLSFieldElement,
+ y: BLSFieldElement,
+ kzg_proof: KZGProof) -> bool:
+ """
+ Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
+ """
+ # Verify: P - y = Q * (X - z)
+ X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
+ P_minus_y = bls.add(bls.bytes48_to_G1(polynomial_kzg), bls.multiply(bls.G1, BLS_MODULUS - y))
+ return bls.pairing_check([
+ [P_minus_y, bls.neg(bls.G2)],
+ [bls.bytes48_to_G1(kzg_proof), X_minus_z]
+ ])
+```
+
+#### `compute_kzg_proof`
+
+```python
+def compute_kzg_proof(polynomial: Sequence[BLSFieldElement], z: BLSFieldElement) -> KZGProof:
+ """Compute KZG proof at point `z` with `polynomial` being in evaluation form"""
+
+ # To avoid SSZ overflow/underflow, convert element into int
+ polynomial = [int(i) for i in polynomial]
+ z = int(z)
+
+ # Shift our polynomial first (in evaluation form we can't handle the division remainder)
+ y = evaluate_polynomial_in_evaluation_form(polynomial, z)
+ polynomial_shifted = [(p - int(y)) % BLS_MODULUS for p in polynomial]
+
+ # Make sure we won't divide by zero during division
+ assert z not in ROOTS_OF_UNITY
+ denominator_poly = [(x - z) % BLS_MODULUS for x in ROOTS_OF_UNITY]
+
+ # Calculate quotient polynomial by doing point-by-point division
+ quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)]
+ return KZGProof(lincomb(KZG_SETUP_LAGRANGE, quotient_polynomial))
+```
+
+### Polynomials
+
+#### `evaluate_polynomial_in_evaluation_form`
+
+```python
+def evaluate_polynomial_in_evaluation_form(polynomial: Sequence[BLSFieldElement],
+ z: BLSFieldElement) -> BLSFieldElement:
+ """
+ Evaluate a polynomial (in evaluation form) at an arbitrary point `z`
+ Uses the barycentric formula:
+ f(z) = (1 - z**WIDTH) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
+ """
+ width = len(polynomial)
+ assert width == FIELD_ELEMENTS_PER_BLOB
+ inverse_width = bls_modular_inverse(width)
+
+ # Make sure we won't divide by zero during division
+ assert z not in ROOTS_OF_UNITY
+
+ result = 0
+ for i in range(width):
+ result += div(int(polynomial[i]) * int(ROOTS_OF_UNITY[i]), (z - ROOTS_OF_UNITY[i]))
+ result = result * (pow(z, width, BLS_MODULUS) - 1) * inverse_width % BLS_MODULUS
+ return result
+```
+
diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md
index 2083934c5..7d4f0b351 100644
--- a/specs/eip4844/validator.md
+++ b/specs/eip4844/validator.md
@@ -10,13 +10,22 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
+- [Custom types](#custom-types)
+- [Containers](#containers)
+ - [`BlobsAndCommitments`](#blobsandcommitments)
+ - [`PolynomialAndCommitment`](#polynomialandcommitment)
- [Helpers](#helpers)
- [`is_data_available`](#is_data_available)
- - [`verify_blobs_sidecar`](#verify_blobs_sidecar)
+ - [`hash_to_bls_field`](#hash_to_bls_field)
+ - [`compute_powers`](#compute_powers)
+ - [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment)
+ - [`validate_blobs_sidecar`](#validate_blobs_sidecar)
+ - [`compute_proof_from_blobs`](#compute_proof_from_blobs)
+ - [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
- - [Blob commitments](#blob-commitments)
+ - [Blob KZG commitments](#blob-kzg-commitments)
- [Beacon Block publishing time](#beacon-block-publishing-time)
@@ -34,36 +43,150 @@ All behaviors and definitions defined in this document, and documents it extends
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP4844](./beacon-chain.md) are requisite for this document and used throughout.
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
+## Custom types
+
+| Name | SSZ equivalent | Description |
+| - | - | - |
+| `Polynomial` | `List[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | a polynomial in evaluation form |
+
+## Containers
+
+### `BlobsAndCommitments`
+
+```python
+class BlobsAndCommitments(Container):
+ blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
+ kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
+```
+
+### `PolynomialAndCommitment`
+
+```python
+class PolynomialAndCommitment(Container):
+ polynomial: Polynomial
+ kzg_commitment: KZGCommitment
+```
+
+
## Helpers
### `is_data_available`
The implementation of `is_data_available` is meant to change with later sharding upgrades.
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
-and verify the sidecar with `verify_blobs`.
+and validate the sidecar with `validate_blobs_sidecar`.
Without the sidecar the block may be processed further optimistically,
but MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
```python
-def is_data_available(slot: Slot, beacon_block_root: Root, kzgs: Sequence[KZGCommitment]):
- sidecar = retrieve_blobs_sidecar(slot, beacon_block_root) # implementation dependent, raises an exception if not available
- verify_blobs_sidecar(slot, beacon_block_root, kzgs, sidecar)
+def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
+ # `retrieve_blobs_sidecar` is implementation dependent, raises an exception if not available.
+ sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
+ validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
+
+ return True
```
-### `verify_blobs_sidecar`
+### `hash_to_bls_field`
```python
-def verify_blobs_sidecar(slot: Slot, beacon_block_root: Root,
- expected_kzgs: Sequence[KZGCommitment], blobs_sidecar: BlobsSidecar):
+def hash_to_bls_field(x: Container) -> BLSFieldElement:
+ """
+ This function is used to generate Fiat-Shamir challenges. The output is not uniform over the BLS field.
+ """
+ return int.from_bytes(hash_tree_root(x), "little") % BLS_MODULUS
+```
+
+### `compute_powers`
+```python
+def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
+ """
+ Return ``x`` to power of [0, n-1].
+ """
+ current_power = 1
+ powers = []
+ for _ in range(n):
+ powers.append(BLSFieldElement(current_power))
+ current_power = current_power * int(x) % BLS_MODULUS
+ return powers
+```
+
+### `compute_aggregated_poly_and_commitment`
+
+```python
+def compute_aggregated_poly_and_commitment(
+ blobs: Sequence[BLSFieldElement],
+ kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment]:
+ """
+ Return the aggregated polynomial and aggregated KZG commitment.
+ """
+ # Generate random linear combination challenges
+ r = hash_to_bls_field(BlobsAndCommitments(blobs=blobs, kzg_commitments=kzg_commitments))
+ r_powers = compute_powers(r, len(kzg_commitments))
+
+ # Create aggregated polynomial in evaluation form
+ aggregated_poly = Polynomial(matrix_lincomb(blobs, r_powers))
+
+ # Compute commitment to aggregated polynomial
+ aggregated_poly_commitment = KZGCommitment(lincomb(kzg_commitments, r_powers))
+
+ return aggregated_poly, aggregated_poly_commitment
+```
+
+### `validate_blobs_sidecar`
+
+```python
+def validate_blobs_sidecar(slot: Slot,
+ beacon_block_root: Root,
+ expected_kzg_commitments: Sequence[KZGCommitment],
+ blobs_sidecar: BlobsSidecar) -> None:
assert slot == blobs_sidecar.beacon_block_slot
assert beacon_block_root == blobs_sidecar.beacon_block_root
blobs = blobs_sidecar.blobs
- assert len(expected_kzgs) == len(blobs)
- for kzg, blob in zip(expected_kzgs, blobs):
- assert blob_to_kzg(blob) == kzg
+ kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
+ assert len(expected_kzg_commitments) == len(blobs)
+
+ aggregated_poly, aggregated_poly_commitment = compute_aggregated_poly_and_commitment(
+ blobs,
+ expected_kzg_commitments,
+ )
+
+ # Generate challenge `x` and evaluate the aggregated polynomial at `x`
+ x = hash_to_bls_field(
+ PolynomialAndCommitment(polynomial=aggregated_poly, kzg_commitment=aggregated_poly_commitment)
+ )
+ # Evaluate aggregated polynomial at `x` (evaluation function checks for div-by-zero)
+ y = evaluate_polynomial_in_evaluation_form(aggregated_poly, x)
+
+ # Verify aggregated proof
+ assert verify_kzg_proof(aggregated_poly_commitment, x, y, kzg_aggregated_proof)
```
+### `compute_proof_from_blobs`
+
+```python
+def compute_proof_from_blobs(blobs: Sequence[BLSFieldElement]) -> KZGProof:
+ commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
+ aggregated_poly, aggregated_poly_commitment = compute_aggregated_poly_and_commitment(blobs, commitments)
+ x = hash_to_bls_field(PolynomialAndCommitment(
+ polynomial=aggregated_poly,
+ kzg_commitment=aggregated_poly_commitment,
+ ))
+ return compute_kzg_proof(aggregated_poly, x)
+```
+
+### `get_blobs_and_kzg_commitments`
+
+The interface to retrieve blobs and corresponding kzg commitments.
+
+Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` may be unified.
+Implementers may also retrieve blobs individually per transaction.
+
+```python
+def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
+ ...
+```
## Beacon chain responsibilities
@@ -74,54 +197,51 @@ Namely, the blob handling and the addition of `BlobsSidecar`.
#### Constructing the `BeaconBlockBody`
-##### Blob commitments
+##### Blob KZG commitments
-After retrieving the execution payload from the execution engine as specified in Bellatrix,
-the blobs are retrieved and processed:
+1. After retrieving the execution payload from the execution engine as specified in Bellatrix,
+use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`.
+2. Validate `blobs` and `blob_kzg_commitments`:
```python
-# execution_payload = execution_engine.get_payload(payload_id)
-# block.body.execution_payload = execution_payload
-# ...
+def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
+ blobs: Sequence[BLSFieldElement],
+ blob_kzg_commitments: Sequence[KZGCommitment]) -> None:
+ # Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
+ assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments)
-kzgs, blobs = get_blobs(payload_id)
-
-# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
-assert verify_kzgs_against_transactions(execution_payload.transactions, kzgs)
-
-# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
-assert len(kzgs) == len(blobs) and [blob_to_kzg(blob) == kzg for blob, kzg in zip(blobs, kzgs)]
-
-# Update the block body
-block.body.blob_kzgs = kzgs
+ # Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
+ assert len(blob_kzg_commitments) == len(blobs)
+ assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)]
```
-The `blobs` should be held with the block in preparation of publishing.
-Without the `blobs`, the published block will effectively be ignored by honest validators.
+3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
-Note: This API is *unstable*. `get_blobs` and `get_payload` may be unified.
-Implementers may also retrieve blobs individually per transaction.
+Note that the `blobs` should be held with the block in preparation of publishing.
+Without the `blobs`, the published block will effectively be ignored by honest validators.
### Beacon Block publishing time
Before publishing a prepared beacon block proposal, the corresponding blobs are packaged into a sidecar object for distribution to the network:
```python
-blobs_sidecar = BlobsSidecar(
- beacon_block_root=hash_tree_root(beacon_block)
- beacon_block_slot=beacon_block.slot
- shard=0,
- blobs=blobs,
-)
+def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar:
+ return BlobsSidecar(
+ beacon_block_root=hash_tree_root(block),
+ beacon_block_slot=block.slot,
+ blobs=blobs,
+ kzg_aggregated_proof=compute_proof_from_blobs(blobs),
+ )
```
And then signed:
```python
-domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, blobs_sidecar.beacon_block_slot / SLOTS_PER_EPOCH)
-signing_root = compute_signing_root(blobs_sidecar, domain)
-signature = bls.Sign(privkey, signing_root)
-signed_blobs_sidecar = SignedBlobsSidecar(message=blobs_sidecar, signature=signature)
+def get_signed_blobs_sidecar(state: BeaconState, blobs_sidecar: BlobsSidecar, privkey: int) -> SignedBlobsSidecar:
+ domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, blobs_sidecar.beacon_block_slot // SLOTS_PER_EPOCH)
+ signing_root = compute_signing_root(blobs_sidecar, domain)
+ signature = bls.Sign(privkey, signing_root)
+ return SignedBlobsSidecar(message=blobs_sidecar, signature=signature)
```
This `signed_blobs_sidecar` is then published to the global `blobs_sidecar` topic as soon as the `beacon_block` is published.
@@ -131,4 +251,3 @@ The validator MUST hold on to blobs for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
to ensure the data-availability of these blobs throughout the network.
After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the blobs and/or stop serving them.
-
diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md
index 1593e07fe..661ad613b 100644
--- a/specs/phase0/fork-choice.md
+++ b/specs/phase0/fork-choice.md
@@ -406,7 +406,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
assert block.parent_root in store.block_states
# Make a copy of the state to avoid mutability issues
pre_state = copy(store.block_states[block.parent_root])
- # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
+ # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past.
assert get_current_slot(store) >= block.slot
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md
index 1b7124e92..54b344791 100644
--- a/specs/phase0/validator.md
+++ b/specs/phase0/validator.md
@@ -260,7 +260,7 @@ A validator should plan for future assignments by noting their assigned attestat
slot and joining the committee index attestation subnet related to their committee assignment.
Specifically a validator should:
-* Call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
+* Call `_, committee_index, _ = get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
* Calculate the committees per slot for the next epoch: `committees_per_slot = get_committee_count_per_slot(state, next_epoch)`
* Calculate the subnet index: `subnet_id = compute_subnet_for_attestation(committees_per_slot, slot, committee_index)`
* Find peers of the pubsub topic `beacon_attestation_{subnet_id}`.
@@ -510,7 +510,7 @@ Finally, the validator broadcasts `attestation` to the associated attestation su
The `subnet_id` for the `attestation` is calculated with:
- Let `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`.
-- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.committee_index)`.
+- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`.
```python
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md
index ede369b95..7d6df51aa 100644
--- a/specs/sharding/beacon-chain.md
+++ b/specs/sharding/beacon-chain.md
@@ -10,60 +10,37 @@
- [Introduction](#introduction)
- [Glossary](#glossary)
-- [Custom types](#custom-types)
- [Constants](#constants)
- [Misc](#misc)
- [Domain types](#domain-types)
- - [Shard Work Status](#shard-work-status)
- - [Misc](#misc-1)
- - [Participation flag indices](#participation-flag-indices)
- - [Incentivization weights](#incentivization-weights)
- [Preset](#preset)
- - [Misc](#misc-2)
+ - [Misc](#misc-1)
+ - [Time parameters](#time-parameters)
- [Shard blob samples](#shard-blob-samples)
- - [Precomputed size verification points](#precomputed-size-verification-points)
- - [Gwei values](#gwei-values)
- [Configuration](#configuration)
-- [Updated containers](#updated-containers)
- - [`AttestationData`](#attestationdata)
- - [`BeaconBlockBody`](#beaconblockbody)
- - [`BeaconState`](#beaconstate)
-- [New containers](#new-containers)
- - [`Builder`](#builder)
- - [`DataCommitment`](#datacommitment)
- - [`AttestedDataCommitment`](#attesteddatacommitment)
- - [`ShardBlobBody`](#shardblobbody)
- - [`ShardBlobBodySummary`](#shardblobbodysummary)
- - [`ShardBlob`](#shardblob)
- - [`ShardBlobHeader`](#shardblobheader)
- - [`SignedShardBlob`](#signedshardblob)
- - [`SignedShardBlobHeader`](#signedshardblobheader)
- - [`PendingShardHeader`](#pendingshardheader)
- - [`ShardBlobReference`](#shardblobreference)
- - [`ShardProposerSlashing`](#shardproposerslashing)
- - [`ShardWork`](#shardwork)
+ - [Time parameters](#time-parameters-1)
+- [Containers](#containers)
+ - [New Containers](#new-containers)
+ - [`BuilderBlockBid`](#builderblockbid)
+ - [`BuilderBlockBidWithRecipientAddress`](#builderblockbidwithrecipientaddress)
+ - [`ShardedCommitmentsContainer`](#shardedcommitmentscontainer)
+ - [`ShardSample`](#shardsample)
+ - [Extended Containers](#extended-containers)
+ - [`BeaconState`](#beaconstate)
+ - [`BuilderBlockData`](#builderblockdata)
+ - [`BeaconBlockBody`](#beaconblockbody)
- [Helper functions](#helper-functions)
- - [Misc](#misc-3)
- - [`next_power_of_two`](#next_power_of_two)
- - [`compute_previous_slot`](#compute_previous_slot)
- - [`compute_updated_sample_price`](#compute_updated_sample_price)
- - [`compute_committee_source_epoch`](#compute_committee_source_epoch)
- - [`batch_apply_participation_flag`](#batch_apply_participation_flag)
- - [Beacon state accessors](#beacon-state-accessors)
- - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot)
- - [`get_active_shard_count`](#get_active_shard_count)
- - [`get_shard_proposer_index`](#get_shard_proposer_index)
- - [`get_start_shard`](#get_start_shard)
- - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
- - [`compute_committee_index_from_shard`](#compute_committee_index_from_shard)
- [Block processing](#block-processing)
- - [Operations](#operations)
- - [Extended Attestation processing](#extended-attestation-processing)
- - [`process_shard_header`](#process_shard_header)
- - [`process_shard_proposer_slashing`](#process_shard_proposer_slashing)
- - [Epoch transition](#epoch-transition)
- - [`process_pending_shard_confirmations`](#process_pending_shard_confirmations)
- - [`reset_pending_shard_work`](#reset_pending_shard_work)
+ - [`is_builder_block_slot`](#is_builder_block_slot)
+ - [Beacon state accessors](#beacon-state-accessors)
+ - [`get_active_shard_count`](#get_active_shard_count)
+- [Beacon chain state transition function](#beacon-chain-state-transition-function)
+ - [Block processing](#block-processing-1)
+ - [`process_block`](#process_block)
+ - [Block header](#block-header)
+ - [Builder Block Bid](#builder-block-bid)
+ - [Sharded data](#sharded-data)
+ - [Execution payload](#execution-payload)
@@ -72,26 +49,14 @@
## Introduction
This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding,
-based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044),
+based on the ideas [here](https://notes.ethereum.org/@dankrad/new_sharding) and more broadly [here](https://arxiv.org/abs/1809.09044),
using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design.
### Glossary
- **Data**: A list of KZG points, to translate a byte string into
- **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions.
-- **Builder**: Independent actor that builds blobs and bids for proposal slots via fee-paying blob-headers, responsible for availability.
-- **Shard proposer**: Validator taking bids from blob builders for shard data opportunity, co-signs with builder to propose the blob.
-## Custom types
-
-We define the following Python custom types for type hinting and readability:
-
-| Name | SSZ equivalent | Description |
-| - | - | - |
-| `Shard` | `uint64` | A shard number |
-| `BLSCommitment` | `Bytes48` | A G1 curve point |
-| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` |
-| `BuilderIndex` | `uint64` | Builder registry index |
## Constants
@@ -101,48 +66,13 @@ The following values are (non-configurable) constants used throughout the specif
| Name | Value | Notes |
| - | - | - |
-| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) modulus |
-| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (= 2) | Factor by which samples are extended for data availability encoding |
-| `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes |
-| `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) |
+| `FIELD_ELEMENTS_PER_SAMPLE` | `uint64(2**4)` (= 16) | 31 * 16 = 496 bytes |
### Domain types
| Name | Value |
| - | - |
-| `DOMAIN_SHARD_BLOB` | `DomainType('0x80000000')` |
-
-### Shard Work Status
-
-| Name | Value | Notes |
-| - | - | - |
-| `SHARD_WORK_UNCONFIRMED` | `0` | Unconfirmed, nullified after confirmation time elapses |
-| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment |
-| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers |
-
-### Misc
-
-TODO: `PARTICIPATION_FLAG_WEIGHTS` backwards-compatibility is difficult, depends on usage.
-
-| Name | Value |
-| - | - |
-| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT, TIMELY_SHARD_WEIGHT]` |
-
-### Participation flag indices
-
-| Name | Value |
-| - | - |
-| `TIMELY_SHARD_FLAG_INDEX` | `3` |
-
-### Incentivization weights
-
-TODO: determine weight for shard attestations
-
-| Name | Value |
-| - | - |
-| `TIMELY_SHARD_WEIGHT` | `uint64(8)` |
-
-TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair code.
+| `DOMAIN_SHARD_SAMPLE` | `DomainType('0x10000000')` |
## Preset
@@ -150,341 +80,140 @@ TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair
| Name | Value | Notes |
| - | - | - |
-| `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) |
-| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count |
-| `SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Sample price may decrease/increase by at most exp(1 / this value) *per epoch* |
-| `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block |
-| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | |
-| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state |
-| `BLOB_BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | shard blob builders |
+| `MAX_SHARDS` | `uint64(2**12)` (= 4,096) | Theoretical max shard count (used to determine data structure sizes) |
+| `ACTIVE_SHARDS` | `uint64(2**8)` (= 256) | Initial shard count |
+| `MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS` | `uint64(2**4)` (= 16) | TODO: Need to define what happens if there were more blocks without builder blocks |
+
+### Time parameters
+
+With the introduction of builder blocks the number of slots per epoch is doubled (it counts beacon blocks and builder blocks).
+
+| Name | Value | Unit | Duration |
+| - | - | :-: | :-: |
+| `SLOTS_PER_EPOCH` | `uint64(2**6)` (= 64) | slots | 8:32 minutes |
### Shard blob samples
| Name | Value | Notes |
| - | - | - |
-| `MAX_SAMPLES_PER_BLOB` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes |
-| `TARGET_SAMPLES_PER_BLOB` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes |
-
-### Precomputed size verification points
-
-| Name | Value |
-| - | - |
-| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. |
-| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` |
-| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOB * POINTS_PER_SAMPLE), MODULUS)` |
-
-### Gwei values
-
-| Name | Value | Unit | Description |
-| - | - | - | - |
-| `MAX_SAMPLE_PRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max sample charged for a TARGET-sized shard blob |
-| `MIN_SAMPLE_PRICE` | `Gwei(2**3)` (= 8) | Gwei | Min sample price charged for a TARGET-sized shard blob |
+| `SAMPLES_PER_BLOB` | `uint64(2**9)` (= 512) | 248 * 512 = 126,976 bytes |
## Configuration
Note: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable.
-E.g. `INITIAL_ACTIVE_SHARDS`, `MAX_SAMPLES_PER_BLOB` and `TARGET_SAMPLES_PER_BLOB`.
+E.g. `ACTIVE_SHARDS` and `SAMPLES_PER_BLOB`.
-## Updated containers
+### Time parameters
-The following containers have updated definitions to support Sharding.
+| Name | Value | Unit | Duration |
+| - | - | :-: | :-: |
+| `SECONDS_PER_SLOT` | `uint64(8)` | seconds | 8 seconds |
-### `AttestationData`
+## Containers
+
+### New Containers
+
+#### `BuilderBlockBid`
```python
-class AttestationData(Container):
+class BuilderBlockBid(Container):
slot: Slot
- index: CommitteeIndex
- # LMD GHOST vote
- beacon_block_root: Root
- # FFG vote
- source: Checkpoint
- target: Checkpoint
- # Hash-tree-root of ShardBlob
- shard_blob_root: Root # [New in Sharding]
+ parent_block_root: Root
+
+ execution_payload_root: Root
+
+ sharded_data_commitment_root: Root # Root of the sharded data (only data, not beacon/builder block commitments)
+
+ sharded_data_commitment_count: uint64 # Count of sharded data commitments
+
+ bid: Gwei # Block builder bid paid to proposer
+
+ validator_index: ValidatorIndex # Validator index for this bid
+
+ # Block builders use an Eth1 address -- need signature as
+ # block bid and data gas base fees will be charged to this address
+ signature_y_parity: bool
+ signature_r: uint256
+ signature_s: uint256
```
-### `BeaconBlockBody`
+#### `BuilderBlockBidWithRecipientAddress`
```python
-class BeaconBlockBody(bellatrix.BeaconBlockBody): # [extends Bellatrix block body]
- shard_proposer_slashings: List[ShardProposerSlashing, MAX_SHARD_PROPOSER_SLASHINGS]
- shard_headers: List[SignedShardBlobHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD]
+class BuilderBlockBidWithRecipientAddress(Container):
+ builder_block_bid: Union[None, BuilderBlockBid]
+ recipient_address: ExecutionAddress # Address to receive the block builder bid
```
-### `BeaconState`
+#### `ShardedCommitmentsContainer`
+
+```python
+class ShardedCommitmentsContainer(Container):
+ sharded_commitments: List[KZGCommitment, 2 * MAX_SHARDS]
+
+ # Aggregate degree proof for all sharded_commitments
+ degree_proof: KZGCommitment
+
+ # The sizes of the blocks encoded in the commitments (last builder and all beacon blocks since)
+ included_block_sizes: List[uint64, MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS + 1]
+
+ # Number of commitments that are for sharded data (no blocks)
+ included_sharded_data_commitments: uint64
+
+ # Random evaluation of beacon blocks + execution payload (this helps with quick verification)
+ block_verification_kzg_proof: KZGCommitment
+```
+
+#### `ShardSample`
+
+```python
+class ShardSample(Container):
+ slot: Slot
+ row: uint64
+ column: uint64
+ data: Vector[BLSFieldElement, FIELD_ELEMENTS_PER_SAMPLE]
+ proof: KZGCommitment
+ builder: ValidatorIndex
+ signature: BLSSignature
+```
+
+### Extended Containers
+
+#### `BeaconState`
```python
class BeaconState(bellatrix.BeaconState):
- # Blob builder registry.
- blob_builders: List[Builder, BLOB_BUILDER_REGISTRY_LIMIT]
- blob_builder_balances: List[Gwei, BLOB_BUILDER_REGISTRY_LIMIT]
- # A ring buffer of the latest slots, with information per active shard.
- shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS]
- shard_sample_price: uint64
+ blocks_since_builder_block: List[BeaconBlock, MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS]
```
-## New containers
-
-### `Builder`
+#### `BuilderBlockData`
```python
-class Builder(Container):
- pubkey: BLSPubkey
- # TODO: fields for either an expiry mechanism (refunding execution account with remaining balance)
- # and/or a builder-transaction mechanism.
-```
+class BuilderBlockData(Container):
+ execution_payload: ExecutionPayload
+ sharded_commitments_container: ShardedCommitmentsContainer
+```
-### `DataCommitment`
+#### `BeaconBlockBody`
```python
-class DataCommitment(Container):
- # KZG10 commitment to the data
- point: BLSCommitment
- # Length of the data in samples
- samples_count: uint64
-```
-
-### `AttestedDataCommitment`
-
-```python
-class AttestedDataCommitment(Container):
- # KZG10 commitment to the data, and length
- commitment: DataCommitment
- # hash_tree_root of the ShardBlobHeader (stored so that attestations can be checked against it)
- root: Root
- # The proposer who included the shard-header
- includer_index: ValidatorIndex
-```
-
-### `ShardBlobBody`
-
-Unsigned shard data, bundled by a shard-builder.
-Unique, signing different bodies as shard proposer for the same `(slot, shard)` is slashable.
-
-```python
-class ShardBlobBody(Container):
- # The actual data commitment
- commitment: DataCommitment
- # Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE
- degree_proof: BLSCommitment
- # The actual data. Should match the commitment and degree proof.
- data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOB]
- # fee payment fields (EIP 1559 like)
- # TODO: express in MWei instead?
- max_priority_fee_per_sample: Gwei
- max_fee_per_sample: Gwei
-```
-
-### `ShardBlobBodySummary`
-
-Summary version of the `ShardBlobBody`, omitting the data payload, while preserving the data-commitments.
-
-The commitments are not further collapsed to a single hash,
-to avoid an extra network roundtrip between proposer and builder, to include the header on-chain more quickly.
-
-```python
-class ShardBlobBodySummary(Container):
- # The actual data commitment
- commitment: DataCommitment
- # Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE
- degree_proof: BLSCommitment
- # Hash-tree-root as summary of the data field
- data_root: Root
- # fee payment fields (EIP 1559 like)
- # TODO: express in MWei instead?
- max_priority_fee_per_sample: Gwei
- max_fee_per_sample: Gwei
-```
-
-### `ShardBlob`
-
-`ShardBlobBody` wrapped with the header data that is unique to the shard blob proposal.
-
-```python
-class ShardBlob(Container):
- slot: Slot
- shard: Shard
- # Builder of the data, pays data-fee to proposer
- builder_index: BuilderIndex
- # Proposer of the shard-blob
- proposer_index: ValidatorIndex
- # Blob contents
- body: ShardBlobBody
-```
-
-### `ShardBlobHeader`
-
-Header version of `ShardBlob`.
-
-```python
-class ShardBlobHeader(Container):
- slot: Slot
- shard: Shard
- # Builder of the data, pays data-fee to proposer
- builder_index: BuilderIndex
- # Proposer of the shard-blob
- proposer_index: ValidatorIndex
- # Blob contents, without the full data
- body_summary: ShardBlobBodySummary
-```
-
-### `SignedShardBlob`
-
-Full blob data, signed by the shard builder (ensuring fee payment) and shard proposer (ensuring a single proposal).
-
-```python
-class SignedShardBlob(Container):
- message: ShardBlob
- signature: BLSSignature
-```
-
-### `SignedShardBlobHeader`
-
-Header of the blob, the signature is equally applicable to `SignedShardBlob`.
-Shard proposers can accept `SignedShardBlobHeader` as a data-transaction by co-signing the header.
-
-```python
-class SignedShardBlobHeader(Container):
- message: ShardBlobHeader
- # Signature by builder.
- # Once accepted by proposer, the signatures is the aggregate of both.
- signature: BLSSignature
-```
-
-### `PendingShardHeader`
-
-```python
-class PendingShardHeader(Container):
- # The commitment that is attested
- attested: AttestedDataCommitment
- # Who voted for the header
- votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
- # Sum of effective balances of votes
- weight: Gwei
- # When the header was last updated, as reference for weight accuracy
- update_slot: Slot
-```
-
-### `ShardBlobReference`
-
-Reference version of `ShardBlobHeader`, substituting the body for just a hash-tree-root.
-
-```python
-class ShardBlobReference(Container):
- slot: Slot
- shard: Shard
- # Builder of the data
- builder_index: BuilderIndex
- # Proposer of the shard-blob
- proposer_index: ValidatorIndex
- # Blob hash-tree-root for slashing reference
- body_root: Root
-```
-
-### `ShardProposerSlashing`
-
-```python
-class ShardProposerSlashing(Container):
- slot: Slot
- shard: Shard
- proposer_index: ValidatorIndex
- builder_index_1: BuilderIndex
- builder_index_2: BuilderIndex
- body_root_1: Root
- body_root_2: Root
- signature_1: BLSSignature
- signature_2: BLSSignature
-```
-
-### `ShardWork`
-
-```python
-class ShardWork(Container):
- # Upon confirmation the data is reduced to just the commitment.
- status: Union[ # See Shard Work Status enum
- None, # SHARD_WORK_UNCONFIRMED
- AttestedDataCommitment, # SHARD_WORK_CONFIRMED
- List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING
- ]
+class BeaconBlockBody(altair.BeaconBlockBody):
+ payload_data: Union[BuilderBlockBid, BuilderBlockData]
```
## Helper functions
-### Misc
+### Block processing
-#### `next_power_of_two`
+#### `is_builder_block_slot`
```python
-def next_power_of_two(x: int) -> int:
- return 2 ** ((x - 1).bit_length())
-```
-
-#### `compute_previous_slot`
-
-```python
-def compute_previous_slot(slot: Slot) -> Slot:
- if slot > 0:
- return Slot(slot - 1)
- else:
- return Slot(0)
-```
-
-#### `compute_updated_sample_price`
-
-```python
-def compute_updated_sample_price(prev_price: Gwei, samples_length: uint64, active_shards: uint64) -> Gwei:
- adjustment_quotient = active_shards * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT
- if samples_length > TARGET_SAMPLES_PER_BLOB:
- delta = max(1, prev_price * (samples_length - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient)
- return min(prev_price + delta, MAX_SAMPLE_PRICE)
- else:
- delta = max(1, prev_price * (TARGET_SAMPLES_PER_BLOB - samples_length) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient)
- return max(prev_price, MIN_SAMPLE_PRICE + delta) - delta
-```
-
-#### `compute_committee_source_epoch`
-
-```python
-def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch:
- """
- Return the source epoch for computing the committee.
- """
- source_epoch = Epoch(epoch - epoch % period)
- if source_epoch >= period:
- source_epoch -= period # `period` epochs lookahead
- return source_epoch
-```
-
-#### `batch_apply_participation_flag`
-
-```python
-def batch_apply_participation_flag(state: BeaconState, bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE],
- epoch: Epoch, full_committee: Sequence[ValidatorIndex], flag_index: int):
- if epoch == get_current_epoch(state):
- epoch_participation = state.current_epoch_participation
- else:
- epoch_participation = state.previous_epoch_participation
- for bit, index in zip(bits, full_committee):
- if bit:
- epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
+def is_builder_block_slot(slot: Slot) -> bool:
+ return slot % 2 == 1
```
### Beacon state accessors
-#### Updated `get_committee_count_per_slot`
-
-```python
-def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
- """
- Return the number of committees in each slot for the given ``epoch``.
- """
- return max(uint64(1), min(
- get_active_shard_count(state, epoch),
- uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
- ))
-```
-
#### `get_active_shard_count`
```python
@@ -493,396 +222,195 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64:
Return the number of active shards.
Note that this puts an upper bound on the number of committees per slot.
"""
- return INITIAL_ACTIVE_SHARDS
-```
-
-#### `get_shard_proposer_index`
-
-```python
-def get_shard_proposer_index(state: BeaconState, slot: Slot, shard: Shard) -> ValidatorIndex:
- """
- Return the proposer's index of shard block at ``slot``.
- """
- epoch = compute_epoch_at_slot(slot)
- seed = hash(get_seed(state, epoch, DOMAIN_SHARD_BLOB) + uint_to_bytes(slot) + uint_to_bytes(shard))
- indices = get_active_validator_indices(state, epoch)
- return compute_proposer_index(state, indices, seed)
-```
-
-#### `get_start_shard`
-
-```python
-def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
- """
- Return the start shard at ``slot``.
- """
- epoch = compute_epoch_at_slot(Slot(slot))
- committee_count = get_committee_count_per_slot(state, epoch)
- active_shard_count = get_active_shard_count(state, epoch)
- return committee_count * slot % active_shard_count
-```
-
-#### `compute_shard_from_committee_index`
-
-```python
-def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard:
- active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot))
- assert index < active_shards
- return Shard((index + get_start_shard(state, slot)) % active_shards)
-```
-
-#### `compute_committee_index_from_shard`
-
-```python
-def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex:
- epoch = compute_epoch_at_slot(slot)
- active_shards = get_active_shard_count(state, epoch)
- index = CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards)
- assert index < get_committee_count_per_slot(state, epoch)
- return index
+ return ACTIVE_SHARDS
```
+## Beacon chain state transition function
### Block processing
+#### `process_block`
+
```python
def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block)
- # is_execution_enabled is omitted, execution is enabled by default.
- process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
- process_randao(state, block.body)
+ verify_builder_block_bid(state, block)
+ process_sharded_data(state, block)
+ if is_execution_enabled(state, block.body):
+ process_execution_payload(state, block, EXECUTION_ENGINE)
+
+ if not is_builder_block_slot(block.slot):
+ process_randao(state, block.body)
+
process_eth1_data(state, block.body)
- process_operations(state, block.body) # [Modified in Sharding]
+ process_operations(state, block.body)
process_sync_aggregate(state, block.body.sync_aggregate)
+
+ if is_builder_block_slot(block.slot):
+ state.blocks_since_builder_block = []
+ state.blocks_since_builder_block.append(block)
```
-#### Operations
+#### Block header
```python
-def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
- # Verify that outstanding deposits are processed up to the maximum number of deposits
- assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
-
- def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
- for operation in operations:
- fn(state, operation)
-
- for_ops(body.proposer_slashings, process_proposer_slashing)
- for_ops(body.attester_slashings, process_attester_slashing)
- # New shard proposer slashing processing
- for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing)
-
- # Limit is dynamic: based on active shard count
- assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state))
- for_ops(body.shard_headers, process_shard_header)
-
- # New attestation processing
- for_ops(body.attestations, process_attestation)
- for_ops(body.deposits, process_deposit)
- for_ops(body.voluntary_exits, process_voluntary_exit)
-
- # TODO: to avoid parallel shards racing, and avoid inclusion-order problems,
- # update the fee price per slot, instead of per header.
- # state.shard_sample_price = compute_updated_sample_price(state.shard_sample_price, ?, shard_count)
-```
-
-##### Extended Attestation processing
-
-```python
-def process_attestation(state: BeaconState, attestation: Attestation) -> None:
- altair.process_attestation(state, attestation)
- process_attested_shard_work(state, attestation)
-```
-
-```python
-def process_attested_shard_work(state: BeaconState, attestation: Attestation) -> None:
- attestation_shard = compute_shard_from_committee_index(
- state,
- attestation.data.slot,
- attestation.data.index,
- )
- full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
-
- buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS
- committee_work = state.shard_buffer[buffer_index][attestation_shard]
-
- # Skip attestation vote accounting if the header is not pending
- if committee_work.status.selector != SHARD_WORK_PENDING:
- # If the data was already confirmed, check if this matches, to apply the flag to the attesters.
- if committee_work.status.selector == SHARD_WORK_CONFIRMED:
- attested: AttestedDataCommitment = committee_work.status.value
- if attested.root == attestation.data.shard_blob_root:
- batch_apply_participation_flag(state, attestation.aggregation_bits,
- attestation.data.target.epoch,
- full_committee, TIMELY_SHARD_FLAG_INDEX)
- return
-
- current_headers: Sequence[PendingShardHeader] = committee_work.status.value
-
- # Find the corresponding header, abort if it cannot be found
- header_index = len(current_headers)
- for i, header in enumerate(current_headers):
- if attestation.data.shard_blob_root == header.attested.root:
- header_index = i
- break
-
- # Attestations for an unknown header do not count towards shard confirmations, but can otherwise be valid.
- if header_index == len(current_headers):
- # Note: Attestations may be re-included if headers are included late.
- return
-
- pending_header: PendingShardHeader = current_headers[header_index]
-
- # The weight may be outdated if it is not the initial weight, and from a previous epoch
- if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state):
- pending_header.weight = sum(state.validators[index].effective_balance for index, bit
- in zip(full_committee, pending_header.votes) if bit)
-
- pending_header.update_slot = state.slot
-
- full_committee_balance = Gwei(0)
- # Update votes bitfield in the state, update weights
- for i, bit in enumerate(attestation.aggregation_bits):
- weight = state.validators[full_committee[i]].effective_balance
- full_committee_balance += weight
- if bit:
- if not pending_header.votes[i]:
- pending_header.weight += weight
- pending_header.votes[i] = True
-
- # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting
- if pending_header.weight * 3 >= full_committee_balance * 2:
- # participants of the winning header are remembered with participation flags
- batch_apply_participation_flag(state, pending_header.votes, attestation.data.target.epoch,
- full_committee, TIMELY_SHARD_FLAG_INDEX)
-
- if pending_header.attested.commitment == DataCommitment():
- # The committee voted to not confirm anything
- state.shard_buffer[buffer_index][attestation_shard].status.change(
- selector=SHARD_WORK_UNCONFIRMED,
- value=None,
- )
- else:
- state.shard_buffer[buffer_index][attestation_shard].status.change(
- selector=SHARD_WORK_CONFIRMED,
- value=pending_header.attested,
- )
-```
-
-##### `process_shard_header`
-
-```python
-def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None:
- header: ShardBlobHeader = signed_header.message
- slot = header.slot
- shard = header.shard
-
- # Verify the header is not 0, and not from the future.
- assert Slot(0) < slot <= state.slot
- header_epoch = compute_epoch_at_slot(slot)
- # Verify that the header is within the processing time window
- assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)]
- # Verify that the shard is valid
- shard_count = get_active_shard_count(state, header_epoch)
- assert shard < shard_count
- # Verify that a committee is able to attest this (slot, shard)
- start_shard = get_start_shard(state, slot)
- committee_index = (shard_count + shard - start_shard) % shard_count
- committees_per_slot = get_committee_count_per_slot(state, header_epoch)
- assert committee_index <= committees_per_slot
-
- # Check that this data is still pending
- committee_work = state.shard_buffer[slot % SHARD_STATE_MEMORY_SLOTS][shard]
- assert committee_work.status.selector == SHARD_WORK_PENDING
-
- # Check that this header is not yet in the pending list
- current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value
- header_root = hash_tree_root(header)
- assert header_root not in [pending_header.attested.root for pending_header in current_headers]
-
- # Verify proposer matches
- assert header.proposer_index == get_shard_proposer_index(state, slot, shard)
-
- # Verify builder and proposer aggregate signature
- blob_signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_BLOB))
- builder_pubkey = state.blob_builders[header.builder_index].pubkey
- proposer_pubkey = state.validators[header.proposer_index].pubkey
- assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_header.signature)
-
- # Verify the length by verifying the degree.
- body_summary = header.body_summary
- points_count = body_summary.commitment.samples_count * POINTS_PER_SAMPLE
- if points_count == 0:
- assert body_summary.degree_proof == G1_SETUP[0]
- assert (
- bls.Pairing(body_summary.degree_proof, G2_SETUP[0])
- == bls.Pairing(body_summary.commitment.point, G2_SETUP[-points_count])
+def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
+ # Verify that the slots match
+ assert block.slot == state.slot
+ # Verify that the block is newer than latest block header
+ assert block.slot > state.latest_block_header.slot
+ # Verify that proposer index is the correct index
+ if not is_builder_block_slot(block.slot):
+ assert block.proposer_index == get_beacon_proposer_index(state)
+ # Verify that the parent matches
+ assert block.parent_root == hash_tree_root(state.latest_block_header)
+ # Cache current block as the new latest block
+ state.latest_block_header = BeaconBlockHeader(
+ slot=block.slot,
+ proposer_index=block.proposer_index,
+ parent_root=block.parent_root,
+ state_root=Bytes32(), # Overwritten in the next process_slot call
+ body_root=hash_tree_root(block.body),
)
- # Charge EIP 1559 fee, builder pays for opportunity, and is responsible for later availability,
- # or fail to publish at their own expense.
- samples = body_summary.commitment.samples_count
- # TODO: overflows, need bigger int type
- max_fee = body_summary.max_fee_per_sample * samples
-
- # Builder must have sufficient balance, even if max_fee is not completely utilized
- assert state.blob_builder_balances[header.builder_index] >= max_fee
-
- base_fee = state.shard_sample_price * samples
- # Base fee must be paid
- assert max_fee >= base_fee
-
- # Remaining fee goes towards proposer for prioritizing, up to a maximum
- max_priority_fee = body_summary.max_priority_fee_per_sample * samples
- priority_fee = min(max_fee - base_fee, max_priority_fee)
-
- # Burn base fee, take priority fee
- # priority_fee <= max_fee - base_fee, thus priority_fee + base_fee <= max_fee, thus sufficient balance.
- state.blob_builder_balances[header.builder_index] -= base_fee + priority_fee
- # Pay out priority fee
- increase_balance(state, header.proposer_index, priority_fee)
-
- # Initialize the pending header
- index = compute_committee_index_from_shard(state, slot, shard)
- committee_length = len(get_beacon_committee(state, slot, index))
- initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length)
- pending_header = PendingShardHeader(
- attested=AttestedDataCommitment(
- commitment=body_summary.commitment,
- root=header_root,
- includer_index=get_beacon_proposer_index(state),
- ),
- votes=initial_votes,
- weight=0,
- update_slot=state.slot,
- )
-
- # Include it in the pending list
- current_headers.append(pending_header)
+ # Verify proposer is not slashed
+ proposer = state.validators[block.proposer_index]
+ assert not proposer.slashed
```
-The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values),
-the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`,
-where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`.
-The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction).
-
-##### `process_shard_proposer_slashing`
+#### Builder Block Bid
```python
-def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None:
- slot = proposer_slashing.slot
- shard = proposer_slashing.shard
- proposer_index = proposer_slashing.proposer_index
+def verify_builder_block_bid(state: BeaconState, block: BeaconBlock) -> None:
+ if is_builder_block_slot(block.slot):
+ # Get last builder block bid
+ assert state.blocks_since_builder_block[-1].body.payload_data.selector == 0
+ builder_block_bid = state.blocks_since_builder_block[-1].body.payload_data.value.builder_block_bid
+ assert builder_block_bid.slot + 1 == block.slot
- reference_1 = ShardBlobReference(slot=slot, shard=shard,
- proposer_index=proposer_index,
- builder_index=proposer_slashing.builder_index_1,
- body_root=proposer_slashing.body_root_1)
- reference_2 = ShardBlobReference(slot=slot, shard=shard,
- proposer_index=proposer_index,
- builder_index=proposer_slashing.builder_index_2,
- body_root=proposer_slashing.body_root_2)
+ assert block.body.payload_data.selector == 1 # Verify that builder block does not contain bid
- # Verify the signed messages are different
- assert reference_1 != reference_2
+ builder_block_data = block.body.payload_data.value
- # Verify the proposer is slashable
- proposer = state.validators[proposer_index]
- assert is_slashable_validator(proposer, get_current_epoch(state))
+ assert builder_block_bid.execution_payload_root == hash_tree_root(builder_block_data.execution_payload)
- # The builders are not slashed, the proposer co-signed with them
- builder_pubkey_1 = state.blob_builders[proposer_slashing.builder_index_1].pubkey
- builder_pubkey_2 = state.blob_builders[proposer_slashing.builder_index_2].pubkey
- domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(slot))
- signing_root_1 = compute_signing_root(reference_1, domain)
- signing_root_2 = compute_signing_root(reference_2, domain)
- assert bls.FastAggregateVerify([builder_pubkey_1, proposer.pubkey], signing_root_1, proposer_slashing.signature_1)
- assert bls.FastAggregateVerify([builder_pubkey_2, proposer.pubkey], signing_root_2, proposer_slashing.signature_2)
+ assert builder_block_bid.sharded_data_commitment_count == builder_block_data.included_sharded_data_commitments
- slash_validator(state, proposer_index)
+ assert builder_block_bid.sharded_data_commitment_root == hash_tree_root(builder_block_data.sharded_commitments[-builder_block_bid.included_sharded_data_commitments:])
+
+ assert builder_block_bid.validator_index == block.proposer_index
+
+ else:
+ assert block.body.payload_data.selector == 0
+
+ builder_block_bid = block.body.payload_data.value.builder_block_bid
+ assert builder_block_bid.slot == block.slot
+ assert builder_block_bid.parent_block_root == block.parent_root
+ # We do not check that the builder address exists or has sufficient balance here.
+ # If it does not have sufficient balance, the block proposer loses out, so it is their
+ # responsibility to check.
+
+ # Check that the builder is a slashable validator. We can probably reduce this requirement and only
+ # ensure that they have 1 ETH in their account as a DOS protection.
+ builder = state.validators[builder_block_bid.validator_index]
+ assert is_slashable_validator(builder, get_current_epoch(state))
```
-### Epoch transition
-
-This epoch transition overrides Bellatrix epoch transition:
+#### Sharded data
```python
-def process_epoch(state: BeaconState) -> None:
- # Sharding pre-processing
- process_pending_shard_confirmations(state)
- reset_pending_shard_work(state)
+def process_sharded_data(state: BeaconState, block: BeaconBlock) -> None:
+ if is_builder_block_slot(block.slot):
+ assert block.body.payload_data.selector == 1
+ sharded_commitments_container = block.body.payload_data.value.sharded_commitments_container
- # Base functionality
- process_justification_and_finalization(state)
- process_inactivity_updates(state)
- process_rewards_and_penalties(state) # Note: modified, see new TIMELY_SHARD_FLAG_INDEX
- process_registry_updates(state)
- process_slashings(state)
- process_eth1_data_reset(state)
- process_effective_balance_updates(state)
- process_slashings_reset(state)
- process_randao_mixes_reset(state)
- process_historical_roots_update(state)
- process_participation_flag_updates(state)
- process_sync_committee_updates(state)
+ # Verify not too many commitments
+ assert len(sharded_commitments_container.sharded_commitments) // 2 <= get_active_shard_count(state, get_current_epoch(state))
+
+ # Verify the degree proof
+ r = hash_to_bls_field(sharded_commitments_container.sharded_commitments, 0)
+ r_powers = compute_powers(r, len(sharded_commitments_container.sharded_commitments))
+ combined_commitment = elliptic_curve_lincomb(sharded_commitments_container.sharded_commitments, r_powers)
+
+ payload_field_elements_per_blob = SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE // 2
+
+ verify_degree_proof(combined_commitment, payload_field_elements_per_blob, sharded_commitments_container.degree_proof)
+
+ # Verify that the 2*N commitments lie on a degree < N polynomial
+ low_degree_check(sharded_commitments_container.sharded_commitments)
+
+ # Verify that blocks since the last builder block have been included
+ blocks_chunked = [bytes_to_field_elements(ssz_serialize(block)) for block in state.blocks_since_builder_block]
+ block_vectors = []
+
+ for block_chunked in blocks_chunked:
+ for i in range(0, len(block_chunked), payload_field_elements_per_blob):
+ block_vectors.append(block_chunked[i:i + payload_field_elements_per_blob])
+
+ number_of_blobs = len(block_vectors)
+ r = hash_to_bls_field(sharded_commitments_container.sharded_commitments[:number_of_blobs], 0)
+ x = hash_to_bls_field(sharded_commitments_container.sharded_commitments[:number_of_blobs], 1)
+
+ r_powers = compute_powers(r, number_of_blobs)
+ combined_vector = vector_lincomb(block_vectors, r_powers)
+ combined_commitment = elliptic_curve_lincomb(sharded_commitments_container.sharded_commitments[:number_of_blobs], r_powers)
+ y = evaluate_polynomial_in_evaluation_form(combined_vector, x)
+
+ verify_kzg_proof(combined_commitment, x, y, sharded_commitments_container.block_verification_kzg_proof)
+
+ # Verify that number of sharded data commitments is correctly indicated
+ assert 2 * (number_of_blobs + included_sharded_data_commitments) == len(sharded_commitments_container.sharded_commitments)
```
-#### `process_pending_shard_confirmations`
+#### Execution payload
```python
-def process_pending_shard_confirmations(state: BeaconState) -> None:
- # Pending header processing applies to the previous epoch.
- # Skip if `GENESIS_EPOCH` because no prior epoch to process.
- if get_current_epoch(state) == GENESIS_EPOCH:
- return
+def process_execution_payload(state: BeaconState, block: BeaconBlock, execution_engine: ExecutionEngine) -> None:
+ if is_builder_block_slot(block.slot):
+ assert block.body.payload_data.selector == 1
+ payload = block.body.payload_data.value.execution_payload
- previous_epoch = get_previous_epoch(state)
- previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
+ # Verify consistency of the parent hash with respect to the previous execution payload header
+ if is_merge_transition_complete(state):
+ assert payload.parent_hash == state.latest_execution_payload_header.block_hash
+ # Verify random
+ assert payload.random == get_randao_mix(state, get_current_epoch(state))
+ # Verify timestamp
+ assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
- # Mark stale headers as unconfirmed
- for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
- buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
- for shard_index in range(len(state.shard_buffer[buffer_index])):
- committee_work = state.shard_buffer[buffer_index][shard_index]
- if committee_work.status.selector == SHARD_WORK_PENDING:
- winning_header = max(committee_work.status.value, key=lambda header: header.weight)
- if winning_header.attested.commitment == DataCommitment():
- committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None)
- else:
- committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.attested)
-```
+ # Get sharded data commitments
+ sharded_commitments_container = block.body.sharded_commitments_container
+ sharded_data_commitments = sharded_commitments_container.sharded_commitments[-sharded_commitments_container.included_sharded_data_commitments:]
-#### `reset_pending_shard_work`
+ # Get all unprocessed builder block bids
+ unprocessed_builder_block_bid_with_recipient_addresses = []
+ for block in state.blocks_since_builder_block[1:]:
+ unprocessed_builder_block_bid_with_recipient_addresses.append(block.body.builder_block_bid_with_recipient_address.value)
-```python
-def reset_pending_shard_work(state: BeaconState) -> None:
- # Add dummy "empty" PendingShardHeader (default vote if no shard header is available)
- next_epoch = get_current_epoch(state) + 1
- next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch)
- committees_per_slot = get_committee_count_per_slot(state, next_epoch)
- active_shards = get_active_shard_count(state, next_epoch)
+ # Verify the execution payload is valid
+ # The execution engine gets two extra payloads: One for the sharded data commitments (these are needed to verify type 3 transactions)
+ # and one for all so far unprocessed builder block bids:
+ # * The execution engine needs to transfer the balance from the bidder to the proposer.
+ # * The execution engine needs to deduct data gas fees from the bidder balances
+ assert execution_engine.execute_payload(payload,
+ sharded_data_commitments,
+ unprocessed_builder_block_bid_with_recipient_addresses)
- for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH):
- buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
-
- # Reset the shard work tracking
- state.shard_buffer[buffer_index] = [ShardWork() for _ in range(active_shards)]
-
- start_shard = get_start_shard(state, slot)
- for committee_index in range(committees_per_slot):
- shard = (start_shard + committee_index) % active_shards
- # a committee is available, initialize a pending shard-header list
- committee_length = len(get_beacon_committee(state, slot, CommitteeIndex(committee_index)))
- state.shard_buffer[buffer_index][shard].status.change(
- selector=SHARD_WORK_PENDING,
- value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD](
- PendingShardHeader(
- attested=AttestedDataCommitment(),
- votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
- weight=0,
- update_slot=slot,
- )
- )
- )
- # a shard without committee available defaults to SHARD_WORK_UNCONFIRMED.
-```
+ # Cache execution payload header
+ state.latest_execution_payload_header = ExecutionPayloadHeader(
+ parent_hash=payload.parent_hash,
+ fee_recipient=payload.fee_recipient,
+ state_root=payload.state_root,
+ receipt_root=payload.receipt_root,
+ logs_bloom=payload.logs_bloom,
+ random=payload.random,
+ block_number=payload.block_number,
+ gas_limit=payload.gas_limit,
+ gas_used=payload.gas_used,
+ timestamp=payload.timestamp,
+ extra_data=payload.extra_data,
+ base_fee_per_gas=payload.base_fee_per_gas,
+ block_hash=payload.block_hash,
+ transactions_root=hash_tree_root(payload.transactions),
+ )
+```
\ No newline at end of file
diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md
index ab32c37fa..3b627a339 100644
--- a/specs/sharding/p2p-interface.md
+++ b/specs/sharding/p2p-interface.md
@@ -13,17 +13,14 @@
- [Misc](#misc)
- [Gossip domain](#gossip-domain)
- [Topics and messages](#topics-and-messages)
- - [Shard blob subnets](#shard-blob-subnets)
- - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id)
- - [Global topics](#global-topics)
- - [`shard_blob_header`](#shard_blob_header)
- - [`shard_blob_tx`](#shard_blob_tx)
- - [`shard_proposer_slashing`](#shard_proposer_slashing)
+ - [Builder block bid](#builder-block-bid)
+ - [`builder_block_bid`](#builder_block_bid)
+ - [Shard sample subnets](#shard-sample-subnets)
+ - [`shard_row_{subnet_id}`](#shard_row_subnet_id)
-
## Introduction
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
@@ -33,12 +30,10 @@ The adjustments and additions for Shards are outlined in this document.
### Misc
-| Name | Value | Description |
-| ---- | ----- | ----------- |
-| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. |
-| `SHARD_TX_PROPAGATION_GRACE_SLOTS` | `4` | The number of slots for a late transaction to propagate |
-| `SHARD_TX_PROPAGATION_BUFFER_SLOTS` | `8` | The number of slots for an early transaction to propagate |
-
+| Name | Value | Description |
+| --------------------------- | ----- | -------------------------------------------------------------------------------- |
+| `SHARD_ROW_SUBNET_COUNT` | `512` | The number of `shard_row_{subnet_id}` subnets used in the gossipsub protocol. |
+| `SHARD_COLUMN_SUBNET_COUNT` | `512` | The number of `shard_column_{subnet_id}` subnets used in the gossipsub protocol. |
## Gossip domain
@@ -48,130 +43,49 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.
| Name | Message Type |
|---------------------------------|--------------------------|
-| `shard_blob_{subnet_id}` | `SignedShardBlob` |
-| `shard_blob_header` | `SignedShardBlobHeader` |
-| `shard_blob_tx` | `SignedShardBlobHeader` |
-| `shard_proposer_slashing` | `ShardProposerSlashing` |
+| `shard_row_{subnet_id}` | `SignedShardSample` |
+| `shard_column_{subnet_id}` | `SignedShardSample` |
+| `builder_block_bid` | `BuilderBlockBid` |
The [DAS network specification](./das-p2p.md) defines additional topics.
-#### Shard blob subnets
+#### Builder block bid
-Shard blob subnets are used by builders to make their blobs available after selection by shard proposers.
+##### `builder_block_bid`
-##### `shard_blob_{subnet_id}`
+- _[IGNORE]_ The `bid` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
+ i.e. validate that `bid.slot <= current_slot + 1`
+ (a client MAY queue future samples for propagation at the appropriate slot).
+- _[IGNORE]_ The `bid` is for the current or next block
+ i.e. validate that `bid.slot >= current_slot`
+- _[IGNORE]_ The `bid` is the first `bid` valid bid for `bid.slot`, or the bid is at least 1% higher than the previous known `bid`
+- _[REJECT]_ The validator defined by `bid.validator_index` exists and is slashable.
+- _[REJECT]_ The bid signature, which is an Eth1 signature, needs to be valid and the address needs to contain enough Ether to cover the bid and the data gas base fee.
-Shard blob data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
+#### Shard sample subnets
-```python
-def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64:
- """
- Compute the correct subnet for a shard blob publication.
- Note, this mimics compute_subnet_for_attestation().
- """
- committee_index = compute_committee_index_from_shard(state, slot, shard)
- committees_per_slot = get_committee_count_per_slot(state, compute_epoch_at_slot(slot))
- slots_since_epoch_start = Slot(slot % SLOTS_PER_EPOCH)
- committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
+Shard sample (row/column) subnets are used by builders to make their samples available as part of their intermediate block release after selection by beacon block proposers.
- return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT)
-```
+##### `shard_row_{subnet_id}`
-The following validations MUST pass before forwarding the `signed_blob`,
-on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.message`.
+Shard sample data, in the form of a `SignedShardSample` is published to the `shard_row_{subnet_id}` and `shard_column_{subnet_id}` subnets.
-- _[IGNORE]_ The `blob` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
- i.e. validate that `blob.slot <= current_slot + 1`
- (a client MAY queue future blobs for propagation at the appropriate slot).
-- _[IGNORE]_ The `blob` is new enough to still be processed --
- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
-- _[REJECT]_ The shard blob is for an active shard --
- i.e. `blob.shard < get_active_shard_count(state, compute_epoch_at_slot(blob.slot))`
-- _[REJECT]_ The `blob.shard` MUST have a committee at the `blob.slot` --
- i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error
-- _[REJECT]_ The shard blob is for the correct subnet --
- i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
-- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
-- _[REJECT]_ The blob is not too large -- the data MUST NOT be larger than the SSZ list-limit, and a client MAY apply stricter bounds.
-- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
-- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment.
-- _[REJECT]_ The blob signature, `signed_blob.signature`, is valid for the aggregate of proposer and builder --
- i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob.signature)`.
-- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's `slot` and `shard`,
- in the context of the current shuffling (defined by the current node head state and `blob.slot`).
- If the `proposer_index` cannot immediately be verified against the expected shuffling,
- the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
+The following validations MUST pass before forwarding the `sample`.
-#### Global topics
+- _[IGNORE]_ The `sample` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
+ i.e. validate that `sample.slot <= current_slot + 1`
+ (a client MAY queue future samples for propagation at the appropriate slot).
+- _[IGNORE]_ The `sample` is new enough to still be processed --
+ i.e. validate that `compute_epoch_at_slot(sample.slot) >= get_previous_epoch(state)`
+- _[REJECT]_ The shard sample is for the correct subnet --
+ i.e. `sample.row == subnet_id` for `shard_row_{subnet_id}` and `sample.column == subnet_id` for `shard_column_{subnet_id}`
+- _[IGNORE]_ The sample is the first sample with valid signature received for the `(sample.builder, sample.slot, sample.row, sample.column)` combination.
+- _[REJECT]_ The `sample.data` MUST NOT contain any point `x >= BLS_MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
+- _[REJECT]_ The validator defined by `sample.builder` exists and is slashable.
+- _[REJECT]_ The sample is proposed by the expected `builder` for the sample's `slot`.
+ i.e., the beacon block at `sample.slot - 1` according to the node's fork choice contains an `IntermediateBlockBid`
+ with `intermediate_block_bid.validator_index == sample.builder`
+- _[REJECT]_ The sample signature, `sample.signature`, is valid for the builder --
+ i.e. `bls.Verify(builder_pubkey, sample_signing_root, sample.signature)` OR `sample.signature == Bytes96(b"\0" * 96)` AND
+ the sample verification `verify_sample` passes
-There are three additional global topics for Sharding.
-
-- `shard_blob_header`: co-signed headers to be included on-chain and to serve as a signal to the builder to publish full data.
-- `shard_blob_tx`: builder-signed headers, also known as "data transaction".
-- `shard_proposer_slashing`: slashings of duplicate shard proposals.
-
-##### `shard_blob_header`
-
-Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet.
-Shard blob headers select shard blob bids by builders
-and should be timely to ensure builders can publish the full shard blob before subsequent attestations.
-
-The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`.
-
-- _[IGNORE]_ The `header` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
- i.e. validate that `header.slot <= current_slot + 1`
- (a client MAY queue future headers for propagation at the appropriate slot).
-- _[IGNORE]_ The header is new enough to still be processed --
- i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
-- _[REJECT]_ The shard header is for an active shard --
- i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))`
-- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` --
- i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error.
-- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
-- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment.
-- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for the aggregate of proposer and builder --
- i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob_header.signature)`.
-- _[REJECT]_ The header is proposed by the expected `proposer_index` for the blob's `header.slot` and `header.shard`
- in the context of the current shuffling (defined by the current node head state and `header.slot`).
- If the `proposer_index` cannot immediately be verified against the expected shuffling,
- the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
-
-##### `shard_blob_tx`
-
-Shard data-transactions in the form of a `SignedShardBlobHeader` are published to the global `shard_blob_tx` subnet.
-These shard blob headers are signed solely by the blob-builder.
-
-The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`.
-
-- _[IGNORE]_ The header is not propagating more than `SHARD_TX_PROPAGATION_BUFFER_SLOTS` slots ahead of time --
- i.e. validate that `header.slot <= current_slot + SHARD_TX_PROPAGATION_BUFFER_SLOTS`.
-- _[IGNORE]_ The header is not propagating later than `SHARD_TX_PROPAGATION_GRACE_SLOTS` slots too late --
- i.e. validate that `header.slot + SHARD_TX_PROPAGATION_GRACE_SLOTS >= current_slot`
-- _[REJECT]_ The shard header is for an active shard --
- i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))`
-- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` --
- i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error.
-- _[IGNORE]_ The header is not stale -- i.e. the corresponding shard proposer has not already selected a header for `(header.slot, header.shard)`.
-- _[IGNORE]_ The header is the first header with valid signature received for the `(header.builder_index, header.slot, header.shard)` combination.
-- _[REJECT]_ The blob builder, define by `header.builder_index`, exists and has sufficient balance to back the fee payment.
-- _[IGNORE]_ The header fee SHOULD be higher than previously seen headers for `(header.slot, header.shard)`, from any builder.
- Propagating nodes MAY increase fee increments in case of spam.
-- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for ONLY the builder --
- i.e. `bls.Verify(builder_pubkey, blob_signing_root, signed_blob_header.signature)`. The signature is not an aggregate with the proposer.
-- _[REJECT]_ The header is designated for proposal by the expected `proposer_index` for the blob's `header.slot` and `header.shard`
- in the context of the current shuffling (defined by the current node head state and `header.slot`).
- If the `proposer_index` cannot immediately be verified against the expected shuffling,
- the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
-
-##### `shard_proposer_slashing`
-
-Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic.
-
-The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network.
-- _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received
- for the proposer with index `proposer_slashing.proposer_index`.
- The `proposer_slashing.slot` and `proposer_slashing.shard` are ignored, there are no repeated or per-shard slashings.
-- _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation.
diff --git a/specs/sharding/polynomial-commitments.md b/specs/sharding/polynomial-commitments.md
new file mode 100644
index 000000000..e2a4285ca
--- /dev/null
+++ b/specs/sharding/polynomial-commitments.md
@@ -0,0 +1,396 @@
+# Sharding -- Polynomial Commitments
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+ - [BLS Field](#bls-field)
+ - [KZG Trusted setup](#kzg-trusted-setup)
+- [Custom types](#custom-types)
+- [Helper functions](#helper-functions)
+ - [`next_power_of_two`](#next_power_of_two)
+ - [`reverse_bit_order`](#reverse_bit_order)
+ - [`list_to_reverse_bit_order`](#list_to_reverse_bit_order)
+- [Field operations](#field-operations)
+ - [Generic field operations](#generic-field-operations)
+ - [`bls_modular_inverse`](#bls_modular_inverse)
+ - [`roots_of_unity`](#roots_of_unity)
+ - [Field helper functions](#field-helper-functions)
+ - [`compute_powers`](#compute_powers)
+ - [`low_degree_check`](#low_degree_check)
+ - [`vector_lincomb`](#vector_lincomb)
+ - [`bytes_to_field_elements`](#bytes_to_field_elements)
+- [Polynomial operations](#polynomial-operations)
+ - [`add_polynomials`](#add_polynomials)
+ - [`multiply_polynomials`](#multiply_polynomials)
+ - [`interpolate_polynomial`](#interpolate_polynomial)
+ - [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
+- [KZG Operations](#kzg-operations)
+ - [Elliptic curve helper functoins](#elliptic-curve-helper-functoins)
+ - [`elliptic_curve_lincomb`](#elliptic_curve_lincomb)
+ - [Hash to field](#hash-to-field)
+ - [`hash_to_bls_field`](#hash_to_bls_field)
+ - [KZG operations](#kzg-operations)
+ - [`verify_kzg_proof`](#verify_kzg_proof)
+ - [`verify_kzg_multiproof`](#verify_kzg_multiproof)
+ - [`verify_degree_proof`](#verify_degree_proof)
+
+
+
+
+
+## Introduction
+
+This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the sharding specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations, and hints what the best known algorithms for these implementations are are included below.
+
+## Constants
+
+### BLS Field
+
+| Name | Value | Notes |
+| - | - | - |
+| `BLS_MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) |
+| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS |
+
+### KZG Trusted setup
+
+| Name | Value |
+| - | - |
+| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. |
+| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` |
+
+## Custom types
+
+We define the following Python custom types for type hinting and readability:
+
+| Name | SSZ equivalent | Description |
+| - | - | - |
+| `KZGCommitment` | `Bytes48` | A G1 curve point |
+| `BLSFieldElement` | `uint256` | A number `x` in the range `0 <= x < BLS_MODULUS` |
+| `BLSPolynomialByCoefficients` | `List[BLSFieldElement]` | A polynomial over the BLS field, given in coefficient form |
+| `BLSPolynomialByEvaluations` | `List[BLSFieldElement]` | A polynomial over the BLS field, given in evaluation form |
+
+## Helper functions
+
+#### `next_power_of_two`
+
+```python
+def next_power_of_two(x: int) -> int:
+ assert x > 0
+ return 2 ** ((x - 1).bit_length())
+```
+
+#### `reverse_bit_order`
+
+```python
+def reverse_bit_order(n: int, order: int) -> int:
+ """
+ Reverse the bit order of an integer n
+ """
+ assert is_power_of_two(order)
+ # Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
+ return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
+```
+
+#### `list_to_reverse_bit_order`
+
+```python
+def list_to_reverse_bit_order(l: List[int]) -> List[int]:
+ """
+ Convert a list between normal and reverse bit order. This operation is idempotent.
+ """
+ return [l[reverse_bit_order(i, len(l))] for i in range(len(l))]
+```
+
+## Field operations
+
+### Generic field operations
+
+#### `bls_modular_inverse`
+
+```python
+def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
+ """
+ Compute the modular inverse of x, i.e. y such that x * y % BLS_MODULUS == 1 and return 1 for x == 0
+ """
+ lm, hm = 1, 0
+ low, high = x % BLS_MODULUS, BLS_MODULUS
+ while low > 1:
+ r = high // low
+ nm, new = hm - lm * r, high - low * r
+ lm, low, hm, high = nm, new, lm, low
+ return lm % BLS_MODULUS
+```
+
+#### `roots_of_unity`
+
+```python
+def roots_of_unity(order: uint64) -> List[BLSFieldElement]:
+ """
+ Compute a list of roots of unity for a given order.
+ The order must divide the BLS multiplicative group order, i.e. BLS_MODULUS - 1
+ """
+ assert (BLS_MODULUS - 1) % order == 0
+ roots = []
+ root_of_unity = pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // order, BLS_MODULUS)
+
+ current_root_of_unity = 1
+ for i in range(SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE):
+ roots.append(current_root_of_unity)
+ current_root_of_unity = current_root_of_unity * root_of_unity % BLS_MODULUS
+ return roots
+```
+
+### Field helper functions
+
+#### `compute_powers`
+
+```python
+def compute_powers(x: BLSFieldElement, n: uint64) -> List[BLSFieldElement]:
+ current_power = 1
+ powers = []
+ for _ in range(n):
+ powers.append(BLSFieldElement(current_power))
+ current_power = current_power * int(x) % BLS_MODULUS
+ return powers
+```
+
+#### `low_degree_check`
+
+```python
+def low_degree_check(commitments: List[KZGCommitment]):
+ """
+ Checks that the commitments are on a low-degree polynomial.
+ If there are 2*N commitments, that means they should lie on a polynomial
+ of degree d = K - N - 1, where K = next_power_of_two(2*N)
+ (The remaining positions are filled with 0, this is to make FFTs usable)
+
+ For details see here: https://notes.ethereum.org/@dankrad/barycentric_low_degree_check
+ """
+ assert len(commitments) % 2 == 0
+ N = len(commitments) // 2
+ r = hash_to_bls_field(commitments, 0)
+ K = next_power_of_two(2 * N)
+ d = K - N - 1
+ r_to_K = pow(r, N, K)
+ roots = list_to_reverse_bit_order(roots_of_unity(K))
+
+ # For an efficient implementation, B and Bprime should be precomputed
+ def B(z):
+ r = 1
+ for w in roots[:d + 1]:
+ r = r * (z - w) % BLS_MODULUS
+ return r
+
+ def Bprime(z):
+ r = 0
+ for i in range(d + 1):
+ m = 1
+ for w in roots[:i] + roots[i + 1:d + 1]:
+ m = m * (z - w) % BLS_MODULUS
+ r = (r + m) % BLS_MODULUS
+ return r
+
+ coefs = []
+ for i in range(K):
+ coefs.append( - (r_to_K - 1) * bls_modular_inverse(K * roots[i * (K - 1) % K] * (r - roots[i])) % BLS_MODULUS)
+ for i in range(d + 1):
+ coefs[i] = (coefs[i] + B(r) * bls_modular_inverse(Bprime(r) * (r - roots[i]))) % BLS_MODULUS
+
+ assert elliptic_curve_lincomb(commitments, coefs) == bls.inf_G1()
+```
+
+#### `vector_lincomb`
+
+```python
+def vector_lincomb(vectors: List[List[BLSFieldElement]], scalars: List[BLSFieldElement]) -> List[BLSFieldElement]:
+ """
+ Compute a linear combination of field element vectors.
+ """
+ r = [0]*len(vectors[0])
+ for v, a in zip(vectors, scalars):
+ for i, x in enumerate(v):
+ r[i] = (r[i] + a * x) % BLS_MODULUS
+ return [BLSFieldElement(x) for x in r]
+```
+
+#### `bytes_to_field_elements`
+
+```python
+def bytes_to_field_elements(block: bytes) -> List[BLSFieldElement]:
+ """
+ Slices a block into 31-byte chunks that can fit into field elements.
+ """
+ sliced_block = [block[i:i + 31] for i in range(0, len(bytes), 31)]
+ return [BLSFieldElement(int.from_bytes(x, "little")) for x in sliced_block]
+```
+
+## Polynomial operations
+
+#### `add_polynomials`
+
+```python
+def add_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients) -> BLSPolynomialByCoefficients:
+ """
+ Sum the polynomials ``a`` and ``b`` given by their coefficients.
+ """
+ a, b = (a, b) if len(a) >= len(b) else (b, a)
+ return [(a[i] + (b[i] if i < len(b) else 0)) % BLS_MODULUS for i in range(len(a))]
+```
+
+#### `multiply_polynomials`
+
+```python
+def multiply_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients) -> BLSPolynomialByCoefficients:
+ """
+ Multiplies the polynomials `a` and `b` given by their coefficients
+ """
+ r = [0]
+ for power, coef in enumerate(a):
+ summand = [0] * power + [coef * x % BLS_MODULUS for x in b]
+ r = add_polynomials(r, summand)
+ return r
+```
+
+
+#### `interpolate_polynomial`
+
+```python
+def interpolate_polynomial(xs: List[BLSFieldElement], ys: List[BLSFieldElement]) -> BLSPolynomialByCoefficients:
+ """
+ Lagrange interpolation
+ """
+ assert len(xs) == len(ys)
+ r = [0]
+
+ for i in range(len(xs)):
+ summand = [ys[i]]
+ for j in range(len(ys)):
+ if j != i:
+ weight_adjustment = bls_modular_inverse(xs[j] - xs[i])
+ summand = multiply_polynomials(
+ summand, [weight_adjustment, ((BLS_MODULUS - weight_adjustment) * xs[i])]
+ )
+ r = add_polynomials(r, summand)
+
+ return r
+```
+
+#### `evaluate_polynomial_in_evaluation_form`
+
+```python
+def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x: BLSFieldElement) -> BLSFieldElement:
+ """
+ Evaluates a polynomial (in evaluation form) at an arbitrary point
+ """
+ field_elements_per_blob = SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE
+ roots = roots_of_unity(field_elements_per_blob)
+
+ def A(z):
+ r = 1
+ for w in roots:
+ r = r * (z - w) % BLS_MODULUS
+ return r
+
+ def Aprime(z):
+ return field_elements_per_blob * pow(z, field_elements_per_blob - 1, BLS_MODULUS)
+
+ r = 0
+ inverses = [bls_modular_inverse(z - x) for z in roots]
+ for i, x in enumerate(inverses):
+ r += poly[i] * bls_modular_inverse(Aprime(roots[i])) * x % BLS_MODULUS
+ r = r * A(x) % BLS_MODULUS
+ return r
+```
+
+## KZG Operations
+
+We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf).
+
+### Elliptic curve helper functoins
+
+#### `elliptic_curve_lincomb`
+
+```python
+def elliptic_curve_lincomb(points: List[KZGCommitment], scalars: List[BLSFieldElement]) -> KZGCommitment:
+ """
+ BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
+ This is a non-optimized implementation.
+ """
+ r = bls.inf_G1()
+ for x, a in zip(points, scalars):
+ r = r.add(x.mult(a))
+ return r
+```
+
+### Hash to field
+
+#### `hash_to_bls_field`
+
+```python
+def hash_to_bls_field(x: Container, challenge_number: uint64) -> BLSFieldElement:
+ """
+ This function is used to generate Fiat-Shamir challenges. The output is not uniform over the BLS field.
+ """
+ return (
+ (int.from_bytes(hash(hash_tree_root(x) + int.to_bytes(challenge_number, 32, "little")), "little"))
+ % BLS_MODULUS
+ )
+```
+
+### KZG operations
+
+#### `verify_kzg_proof`
+
+```python
+def verify_kzg_proof(commitment: KZGCommitment, x: BLSFieldElement, y: BLSFieldElement, proof: KZGCommitment) -> None:
+ """
+ Check that `proof` is a valid KZG proof for the polynomial committed to by `commitment` evaluated
+ at `x` equals `y`.
+ """
+ zero_poly = G2_SETUP[1].add(G2_SETUP[0].mult(x).neg())
+
+ assert (
+ bls.Pairing(proof, zero_poly)
+ == bls.Pairing(commitment.add(G1_SETUP[0].mult(y).neg), G2_SETUP[0])
+ )
+```
+
+#### `verify_kzg_multiproof`
+
+```python
+def verify_kzg_multiproof(commitment: KZGCommitment,
+ xs: List[BLSFieldElement],
+ ys: List[BLSFieldElement],
+ proof: KZGCommitment) -> None:
+ """
+ Verify a KZG multiproof.
+ """
+ zero_poly = elliptic_curve_lincomb(G2_SETUP[:len(xs)], interpolate_polynomial(xs, [0] * len(ys)))
+ interpolated_poly = elliptic_curve_lincomb(G2_SETUP[:len(xs)], interpolate_polynomial(xs, ys))
+
+ assert (
+ bls.Pairing(proof, zero_poly)
+ == bls.Pairing(commitment.add(interpolated_poly.neg()), G2_SETUP[0])
+ )
+```
+
+#### `verify_degree_proof`
+
+```python
+def verify_degree_proof(commitment: KZGCommitment, degree_bound: uint64, proof: KZGCommitment):
+ """
+ Verifies that the commitment is of polynomial degree < degree_bound.
+ """
+
+ assert (
+ bls.Pairing(proof, G2_SETUP[0])
+ == bls.Pairing(commitment, G2_SETUP[-degree_bound])
+ )
+```
\ No newline at end of file
diff --git a/specs/sharding/validator.md b/specs/sharding/validator.md
new file mode 100644
index 000000000..38914095f
--- /dev/null
+++ b/specs/sharding/validator.md
@@ -0,0 +1,141 @@
+# Sharding -- Honest Validator
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+ - [Introduction](#introduction)
+ - [Prerequisites](#prerequisites)
+ - [Constants](#constants)
+ - [Sample counts](#sample-counts)
+ - [Helpers](#helpers)
+ - [`get_validator_row_subnets`](#get_validator_row_subnets)
+ - [`get_validator_column_subnets`](#get_validator_column_subnets)
+ - [`reconstruct_polynomial`](#reconstruct_polynomial)
+ - [Sample verification](#sample-verification)
+ - [`verify_sample`](#verify_sample)
+- [Beacon chain responsibilities](#beacon-chain-responsibilities)
+ - [Validator assignments](#validator-assignments)
+ - [Attesting](#attesting)
+- [Sample reconstruction](#sample-reconstruction)
+ - [Minimum online validator requirement](#minimum-online-validator-requirement)
+
+
+
+
+## Introduction
+
+This document represents the changes to be made in the code of an "honest validator" to implement executable beacon chain proposal.
+
+## Prerequisites
+
+This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide.
+All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
+
+All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this document and used throughout.
+Please see related Beacon Chain doc before continuing and use them as a reference throughout.
+
+## Constants
+
+### Sample counts
+
+| Name | Value |
+| - | - |
+| `VALIDATOR_SAMPLE_ROW_COUNT` | `2` |
+| `VALIDATOR_SAMPLE_COLUMN_COUNT` | `2` |
+
+## Helpers
+
+### `get_validator_row_subnets`
+
+TODO: Currently the subnets are public (i.e. anyone can derive them.) This is good for a proof of custody with public verifiability, but bad for validator privacy.
+
+```python
+def get_validator_row_subnets(validator: Validator, epoch: Epoch) -> List[uint64]:
+ return [int.from_bytes(hash_tree_root([validator.pubkey, 0, i])) for i in range(VALIDATOR_SAMPLE_ROW_COUNT)]
+```
+
+### `get_validator_column_subnets`
+
+```python
+def get_validator_column_subnets(validator: Validator, epoch: Epoch) -> List[uint64]:
+ return [int.from_bytes(hash_tree_root([validator.pubkey, 1, i])) for i in range(VALIDATOR_SAMPLE_COLUMN_COUNT)]
+```
+
+### `reconstruct_polynomial`
+
+```python
+def reconstruct_polynomial(samples: List[SignedShardSample]) -> List[SignedShardSample]:
+ """
+ Reconstructs one full row/column from at least 1/2 of the samples
+ """
+
+```
+
+## Sample verification
+
+### `verify_sample`
+
+```python
+def verify_sample(state: BeaconState, block: BeaconBlock, sample: SignedShardSample):
+ assert sample.row < 2 * get_active_shard_count(state, get_current_epoch(block.slot))
+ assert sample.column < 2 * SAMPLES_PER_BLOB
+ assert block.slot == sample.slot
+
+ # Verify builder signature.
+ # TODO: We should probably not do this. This should only be done by p2p to verify samples *before* intermediate block is in
+ # builder = state.validators[signed_block.message.proposer_index]
+ # signing_root = compute_signing_root(sample, get_domain(state, DOMAIN_SHARD_SAMPLE))
+ # assert bls.Verify(sample.builder, signing_root, sample.signature)
+
+ roots_in_rbo = list_to_reverse_bit_order(roots_of_unity(SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE))
+
+ # Verify KZG proof
+ verify_kzg_multiproof(block.body.payload_data.value.sharded_commitments_container.sharded_commitments[sample.row],
+ roots_in_rbo[sample.column * FIELD_ELEMENTS_PER_SAMPLE:(sample.column + 1) * FIELD_ELEMENTS_PER_SAMPLE]
+ sample.data,
+ sample.proof)
+```
+
+# Beacon chain responsibilities
+
+## Validator assignments
+
+### Attesting
+
+Every attester is assigned `VALIDATOR_SAMPLE_ROW_COUNT` rows and `VALIDATOR_SAMPLE_COLUMN_COUNT` columns of shard samples. As part of their validator duties, they should subscribe to the subnets given by `get_validator_row_subnets` and `get_validator_column_subnets`, for the whole epoch.
+
+A row or column is *available* for a `slot` if at least half of the total number of samples were received on the subnet and passed `verify_sample`. Otherwise it is called unavailable.
+
+If a validator is assigned to an attestation at slot `attestation_slot` and had his previous attestation duty at `previous_attestation_slot`, then they should only attest under the following conditions:
+
+ * For all intermediate blocks `block` with `previous_attestation_slot < block.slot <= attestation_slot`: All sample rows and columns assigned to the validator were available.
+
+If this condition is not fulfilled, then the validator should instead attest to the last block for which the condition holds.
+
+This leads to the security property that a chain that is not fully available cannot have more than 1/16th of all validators voting for it. TODO: This claim is for an "infinite number" of validators. Compute the concrete security due to sampling bias.
+
+# Sample reconstruction
+
+A validator that has received enough samples of a row or column to mark it as available, should reconstruct all samples in that row/column (if they aren't all available already.) The function `reconstruct_polynomial` gives an example implementation for this.
+
+Once they have run the reconstruction function, they should distribute the samples that they reconstructed on all pubsub that
+the local node is subscribed to, if they have not already received that sample on that pubsub. As an example:
+
+ * The validator is subscribed to row `2` and column `5`
+ * The sample `(row, column) = (2, 5)` is missing in the column `5` pubsub
+ * After they have reconstruction of row `2`, the validator should send the sample `(2, 5)` on to the row `2` pubsub (if it was missing) as well as the column `5` pubsub.
+
+TODO: We need to verify the total complexity of doing this and make sure this does not cause too much load on a validator
+
+## Minimum online validator requirement
+
+The data availability construction guarantees that reconstruction is possible if 75% of all samples are available. In this case, at least 50% of all rows and 50% of all columns are independently available. In practice, it is likely that some supernodes will centrally collect all samples and fill in any gaps. However, we want to build a system that reliably reconstructs even absent all supernodes. Any row or column with 50% of samples will easily be reconstructed even with only 100s of validators online; so the only question is how we get to 50% of samples for all rows and columns, when some of them might be completely unseeded.
+
+Each validator will transfer 4 samples between rows and columns where there is overlap. Without loss of generality, look at row 0. Each validator has 1/128 chance of having a sample in this row, and we need 256 samples to reconstruct it. So we expect that we need ~256 * 128 = 32,768 validators to have a fair chance of reconstructing it if it was completely unseeded.
+
+A more elaborate estimate [here](https://notes.ethereum.org/@dankrad/minimum-reconstruction-validators) needs about 55,000 validators to be online for high safety that each row and column will be reconstructed.
\ No newline at end of file
diff --git a/sync/optimistic.md b/sync/optimistic.md
index c2f06a9be..4e03cc6bb 100644
--- a/sync/optimistic.md
+++ b/sync/optimistic.md
@@ -1,5 +1,38 @@
# Optimistic Sync
+## Table of contents
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+- [Helpers](#helpers)
+- [Mechanisms](#mechanisms)
+ - [When to optimistically import blocks](#when-to-optimistically-import-blocks)
+ - [How to optimistically import blocks](#how-to-optimistically-import-blocks)
+ - [How to apply `latestValidHash` when payload status is `INVALID`](#how-to-apply-latestvalidhash-when-payload-status-is-invalid)
+ - [Execution Engine Errors](#execution-engine-errors)
+ - [Assumptions about Execution Engine Behaviour](#assumptions-about-execution-engine-behaviour)
+ - [Re-Orgs](#re-orgs)
+- [Fork Choice](#fork-choice)
+ - [Fork Choice Poisoning](#fork-choice-poisoning)
+- [Checkpoint Sync (Weak Subjectivity Sync)](#checkpoint-sync-weak-subjectivity-sync)
+- [Validator assignments](#validator-assignments)
+ - [Block Production](#block-production)
+ - [Attesting](#attesting)
+ - [Participating in Sync Committees](#participating-in-sync-committees)
+- [Ethereum Beacon APIs](#ethereum-beacon-apis)
+- [Design Decision Rationale](#design-decision-rationale)
+ - [Why sync optimistically?](#why-sync-optimistically)
+ - [Why `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY`?](#why-safe_slots_to_import_optimistically)
+ - [Transitioning from VALID -> INVALIDATED or INVALIDATED -> VALID](#transitioning-from-valid---invalidated-or-invalidated---valid)
+ - [What about Light Clients?](#what-about-light-clients)
+ - [What if `TERMINAL_BLOCK_HASH` is used?](#what-if-terminal_block_hash-is-used)
+
+
+
+
## Introduction
In order to provide a syncing execution engine with a partial view of the head
@@ -89,8 +122,12 @@ def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot
return False
```
-Let only a node which returns `is_optimistic(opt_store, head) is True` be an *optimistic
-node*. Let only a validator on an optimistic node be an *optimistic validator*.
+Let a node be an *optimistic node* if its fork choice is in one of the following states:
+1. `is_optimistic(opt_store, head) is True`
+2. Blocks from every viable (with respect to FFG) branch have transitioned from `NOT_VALIDATED` to `INVALIDATED`
+leaving the block tree without viable branches
+
+Let only a validator on an optimistic node be an *optimistic validator*.
When this specification only defines behaviour for an optimistic
node/validator, but *not* for the non-optimistic case, assume default
@@ -163,6 +200,23 @@ the merge block MUST be treated the same as
an `INVALIDATED` block (i.e., it and all its descendants are invalidated and
removed from the block tree).
+### How to apply `latestValidHash` when payload status is `INVALID`
+
+Processing an `INVALID` payload status depends on the `latestValidHash` parameter.
+The general approach is as follows:
+1. Consensus engine MUST identify `invalidBlock` as per definition in the table below.
+2. `invalidBlock` and all of its descendants MUST be transitioned from `NOT_VALIDATED` to `INVALIDATED`.
+
+| `latestValidHash` | `invalidBlock` |
+|:- |:- |
+| Execution block hash | The *child* of a block with `body.execution_payload.block_hash == latestValidHash` in the chain containing the block with payload in question |
+| `0x00..00` (all zeroes) | The first block with `body.execution_payload != ExecutionPayload()` in the chain containing a block with payload in question |
+| `null` | Block with payload in question |
+
+When `latestValidHash` is a meaningful execution block hash but consensus engine
+cannot find a block satisfying `body.execution_payload.block_hash == latestValidHash`,
+consensus engine SHOULD behave the same as if `latestValidHash` was `null`.
+
### Execution Engine Errors
When an execution engine returns an error or fails to respond to a payload
@@ -270,6 +324,28 @@ optimistic blocks (and vice-versa).
## Design Decision Rationale
+### Why sync optimistically?
+
+Most execution engines use state sync as a default sync mechanism on Ethereum Mainnet
+because executing blocks from genesis takes several weeks on commodity hardware.
+
+State sync requires the knowledge of the current head of the chain to converge eventually.
+If not constantly fed with the most recent head, state sync won't be able to complete
+because the recent state soon becomes unavailable due to state trie pruning.
+
+Optimistic block import (i.e. import when the execution engine *cannot* currently validate the payload)
+breaks a deadlock between the execution layer sync process and importing beacon blocks
+while the execution engine is syncing.
+
+Optimistic sync is also an optimal strategy for execution engines using block execution as a default
+sync mechanism (e.g. Erigon). Alternatively, a consensus engine may inform the execution engine with a payload
+obtained from a checkpoint block, then wait until the execution layer catches up with it and proceed
+in lock step after that. This alternative approach would keep user in limbo for several hours and
+would increase time of the sync process as batch sync has more opportunities for optimisation than the lock step.
+
+Aforementioned premises make optimistic sync a *generalized* solution for interaction between consensus and
+execution engines during the sync process.
+
### Why `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY`?
Nodes can only import an optimistic block if their justified checkpoint is
@@ -328,7 +404,7 @@ specification since it's only possible with a faulty EE.
Such a scenario requires manual intervention.
-## What about Light Clients?
+### What about Light Clients?
An alternative to optimistic sync is to run a light client inside/alongside
beacon nodes that mitigates the need for optimistic sync by providing
@@ -340,7 +416,7 @@ A notable thing about optimistic sync is that it's *optional*. Should an
implementation decide to go the light-client route, then they can just ignore
optimistic sync all together.
-## What if `TERMINAL_BLOCK_HASH` is used?
+### What if `TERMINAL_BLOCK_HASH` is used?
If the terminal block hash override is used (i.e., `TERMINAL_BLOCK_HASH !=
Hash32()`), the [`validate_merge_block`](../specs/bellatrix/fork-choice.md#validate_merge_block)
diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt
index a94c1f6e2..e11954015 100644
--- a/tests/core/pyspec/eth2spec/VERSION.txt
+++ b/tests/core/pyspec/eth2spec/VERSION.txt
@@ -1 +1 @@
-1.2.0-rc.1
+1.2.0-rc.2
\ No newline at end of file
diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py
index 6850af188..1d2b03fd2 100644
--- a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py
+++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py
@@ -1,9 +1,12 @@
+from eth_utils import encode_hex
import os
import time
import shutil
import argparse
from pathlib import Path
+from filelock import FileLock
import sys
+import json
from typing import Iterable, AnyStr, Any, Callable
import traceback
@@ -95,6 +98,20 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
yaml = YAML(pure=True)
yaml.default_flow_style = None
+ # Spec config is using a YAML subset
+ cfg_yaml = YAML(pure=True)
+ cfg_yaml.default_flow_style = False # Emit separate line for each key
+
+ def cfg_represent_bytes(self, data):
+ return self.represent_int(encode_hex(data))
+
+ cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
+
+ def cfg_represent_quoted_str(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
+
+ cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
+
log_file = Path(output_dir) / 'testgen_error_log.txt'
print(f"Generating tests into {output_dir}")
@@ -111,6 +128,8 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
collected_test_count = 0
generated_test_count = 0
skipped_test_count = 0
+ test_identifiers = []
+
provider_start = time.time()
for tprov in test_providers:
if not collect_only:
@@ -123,12 +142,10 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
/ Path(test_case.runner_name) / Path(test_case.handler_name)
/ Path(test_case.suite_name) / Path(test_case.case_name)
)
- incomplete_tag_file = case_dir / "INCOMPLETE"
-
collected_test_count += 1
- if collect_only:
- print(f"Collected test at: {case_dir}")
- continue
+ print(f"Collected test at: {case_dir}")
+
+ incomplete_tag_file = case_dir / "INCOMPLETE"
if case_dir.exists():
if not args.force and not incomplete_tag_file.exists():
@@ -167,10 +184,14 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
written_part = True
if out_kind == "meta":
meta[name] = data
- if out_kind == "data":
- output_part("data", name, dump_yaml_fn(data, name, file_mode, yaml))
- if out_kind == "ssz":
- output_part("ssz", name, dump_ssz_fn(data, name, file_mode))
+ elif out_kind == "cfg":
+ output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
+ elif out_kind == "data":
+ output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
+ elif out_kind == "ssz":
+ output_part(out_kind, name, dump_ssz_fn(data, name, file_mode))
+ else:
+ assert False # Unknown kind
except SkippedTest as e:
print(e)
skipped_test_count += 1
@@ -198,6 +219,15 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
shutil.rmtree(case_dir)
else:
generated_test_count += 1
+ test_identifier = "::".join([
+ test_case.preset_name,
+ test_case.fork_name,
+ test_case.runner_name,
+ test_case.handler_name,
+ test_case.suite_name,
+ test_case.case_name
+ ])
+ test_identifiers.append(test_identifier)
# Only remove `INCOMPLETE` tag file
os.remove(incomplete_tag_file)
test_end = time.time()
@@ -216,6 +246,28 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
if span > TIME_THRESHOLD_TO_PRINT:
summary_message += f" in {span} seconds"
print(summary_message)
+ diagnostics = {
+ "collected_test_count": collected_test_count,
+ "generated_test_count": generated_test_count,
+ "skipped_test_count": skipped_test_count,
+ "test_identifiers": test_identifiers,
+ "durations": [f"{span} seconds"],
+ }
+ diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
+ diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
+ with diagnostics_lock:
+ diagnostics_path.touch(exist_ok=True)
+ if os.path.getsize(diagnostics_path) == 0:
+ with open(diagnostics_path, "w+") as f:
+ json.dump(diagnostics, f)
+ else:
+ with open(diagnostics_path, "r+") as f:
+ existing_diagnostics = json.load(f)
+ for k, v in diagnostics.items():
+ existing_diagnostics[k] += v
+ with open(diagnostics_path, "w+") as f:
+ json.dump(existing_diagnostics, f)
+ print(f"wrote diagnostics to {diagnostics_path}")
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py
index 669238d1c..2bb66d06c 100644
--- a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py
+++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py
@@ -10,9 +10,10 @@ from dataclasses import dataclass
# Elements: name, out_kind, data
#
# out_kind is the type of data:
+# - "meta" for generic data to collect into a meta data dict
+# - "cfg" for a spec config dictionary
# - "data" for generic
# - "ssz" for SSZ encoded bytes
-# - "meta" for generic data to collect into a meta data dict.
TestCasePart = NewType("TestCasePart", Tuple[str, str, Any])
diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py
index 8a2deabb1..f593353f9 100644
--- a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py
+++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py
@@ -31,7 +31,7 @@ from eth2spec.test.context import (
@spec_state_test
@always_bls
def test_invalid_signature_bad_domain(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(
@@ -52,7 +52,7 @@ def test_invalid_signature_bad_domain(spec, state):
@spec_state_test
@always_bls
def test_invalid_signature_missing_participant(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(2020)
random_participant = rng.choice(committee_indices)
@@ -116,7 +116,7 @@ def test_invalid_signature_infinite_signature_with_single_participant(spec, stat
@spec_state_test
@always_bls
def test_invalid_signature_extra_participant(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(3030)
random_participant = rng.choice(committee_indices)
@@ -140,7 +140,7 @@ def test_invalid_signature_extra_participant(spec, state):
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
@@ -156,7 +156,7 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [False] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
@@ -172,7 +172,7 @@ def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
assert len(committee_bits) == committee_size
@@ -189,7 +189,7 @@ def test_sync_committee_rewards_duplicate_committee_half_participation(spec, sta
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
@@ -205,7 +205,7 @@ def test_sync_committee_rewards_duplicate_committee_full_participation(spec, sta
@spec_state_test
@always_bls
def test_sync_committee_rewards_not_full_participants(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(1010)
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
@@ -216,7 +216,7 @@ def test_sync_committee_rewards_not_full_participants(spec, state):
@spec_state_test
@always_bls
def test_sync_committee_rewards_empty_participants(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
committee_bits = [False for _ in committee_indices]
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@@ -226,7 +226,7 @@ def test_sync_committee_rewards_empty_participants(spec, state):
@spec_state_test
@always_bls
def test_invalid_signature_past_block(spec, state):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
for _ in range(2):
# NOTE: need to transition twice to move beyond the degenerate case at genesis
@@ -280,7 +280,7 @@ def test_invalid_signature_previous_committee(spec, state):
# Use the previous sync committee to produce the signature.
# Ensure that the pubkey sets are different.
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
- committee_indices = compute_committee_indices(spec, state, old_sync_committee)
+ committee_indices = compute_committee_indices(state, old_sync_committee)
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(
@@ -322,7 +322,7 @@ def test_valid_signature_future_committee(spec, state):
assert sync_committee != old_current_sync_committee
assert sync_committee != old_next_sync_committee
- committee_indices = compute_committee_indices(spec, state, sync_committee)
+ committee_indices = compute_committee_indices(state, sync_committee)
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(
@@ -344,7 +344,7 @@ def test_valid_signature_future_committee(spec, state):
@always_bls
@with_presets([MINIMAL], reason="prefer short search to find matching proposer")
def test_proposer_in_committee_without_participation(spec, state):
- committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
# NOTE: seem to reliably be getting a matching proposer in the first epoch w/ ``MINIMAL`` preset.
for _ in range(spec.SLOTS_PER_EPOCH):
@@ -385,7 +385,7 @@ def test_proposer_in_committee_without_participation(spec, state):
@always_bls
@with_presets([MINIMAL], reason="prefer short search to find matching proposer")
def test_proposer_in_committee_with_participation(spec, state):
- committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
participation = [True for _ in committee_indices]
# NOTE: seem to reliably be getting a matching proposer in the first epoch w/ ``MINIMAL`` preset.
@@ -451,7 +451,7 @@ def test_sync_committee_with_participating_exited_member(spec, state):
for _ in range(3):
next_epoch_via_block(spec, state)
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(1010)
exited_index = _exit_validator_from_committee_and_transition_state(
@@ -490,7 +490,7 @@ def test_sync_committee_with_nonparticipating_exited_member(spec, state):
for _ in range(3):
next_epoch_via_block(spec, state)
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(1010)
exited_index = _exit_validator_from_committee_and_transition_state(
@@ -533,7 +533,7 @@ def test_sync_committee_with_participating_withdrawable_member(spec, state):
for _ in range(3):
next_epoch_via_block(spec, state)
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(1010)
exited_index = _exit_validator_from_committee_and_transition_state(
@@ -572,7 +572,7 @@ def test_sync_committee_with_nonparticipating_withdrawable_member(spec, state):
for _ in range(3):
next_epoch_via_block(spec, state)
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
rng = random.Random(1010)
exited_index = _exit_validator_from_committee_and_transition_state(
diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py
index 903df4081..a402e3d54 100644
--- a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py
+++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py
@@ -28,7 +28,7 @@ from eth2spec.test.context import (
def _test_harness_for_randomized_test_case(spec, state, expect_duplicates=False, participation_fn=None):
- committee_indices = compute_committee_indices(spec, state)
+ committee_indices = compute_committee_indices(state)
if participation_fn:
participating_indices = participation_fn(committee_indices)
diff --git a/tests/core/pyspec/eth2spec/test/altair/merkle/__init__.py b/tests/core/pyspec/eth2spec/test/altair/light_client/__init__.py
similarity index 100%
rename from tests/core/pyspec/eth2spec/test/altair/merkle/__init__.py
rename to tests/core/pyspec/eth2spec/test/altair/light_client/__init__.py
diff --git a/tests/core/pyspec/eth2spec/test/altair/merkle/test_single_proof.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
similarity index 57%
rename from tests/core/pyspec/eth2spec/test/altair/merkle/test_single_proof.py
rename to tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
index 31cdd13bb..064760bf0 100644
--- a/tests/core/pyspec/eth2spec/test/altair/merkle/test_single_proof.py
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
@@ -2,14 +2,32 @@ from eth2spec.test.context import (
spec_state_test,
with_altair_and_later,
)
-from eth2spec.test.helpers.merkle import build_proof
+
+
+@with_altair_and_later
+@spec_state_test
+def test_current_sync_committee_merkle_proof(spec, state):
+ yield "state", state
+ current_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
+ yield "proof", {
+ "leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
+ "leaf_index": spec.CURRENT_SYNC_COMMITTEE_INDEX,
+ "branch": ['0x' + root.hex() for root in current_sync_committee_branch]
+ }
+ assert spec.is_valid_merkle_branch(
+ leaf=state.current_sync_committee.hash_tree_root(),
+ branch=current_sync_committee_branch,
+ depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_INDEX),
+ index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_INDEX),
+ root=state.hash_tree_root(),
+ )
@with_altair_and_later
@spec_state_test
def test_next_sync_committee_merkle_proof(spec, state):
yield "state", state
- next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
+ next_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
yield "proof", {
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
"leaf_index": spec.NEXT_SYNC_COMMITTEE_INDEX,
@@ -28,7 +46,7 @@ def test_next_sync_committee_merkle_proof(spec, state):
@spec_state_test
def test_finality_root_merkle_proof(spec, state):
yield "state", state
- finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
+ finality_branch = spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
yield "proof", {
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
"leaf_index": spec.FINALIZED_ROOT_INDEX,
diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
new file mode 100644
index 000000000..47348717b
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
@@ -0,0 +1,430 @@
+from typing import (Any, Dict, List)
+
+from eth_utils import encode_hex
+from eth2spec.test.context import (
+ spec_state_test_with_matching_config,
+ with_presets,
+ with_altair_and_later,
+)
+from eth2spec.test.helpers.attestations import (
+ next_slots_with_attestations,
+ state_transition_with_full_block,
+)
+from eth2spec.test.helpers.constants import MINIMAL
+from eth2spec.test.helpers.light_client import (
+ get_sync_aggregate,
+)
+from eth2spec.test.helpers.state import (
+ next_slots,
+ transition_to,
+)
+
+
+def setup_test(spec, state):
+ class LightClientSyncTest(object):
+ steps: List[Dict[str, Any]]
+ genesis_validators_root: spec.Root
+ store: spec.LightClientStore
+
+ test = LightClientSyncTest()
+ test.steps = []
+
+ yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex()
+ test.genesis_validators_root = state.genesis_validators_root
+
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
+ trusted_block = state_transition_with_full_block(spec, state, True, True)
+ trusted_block_root = trusted_block.message.hash_tree_root()
+ bootstrap = spec.create_light_client_bootstrap(state)
+ yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex()
+ yield "bootstrap", bootstrap
+ test.store = spec.initialize_light_client_store(trusted_block_root, bootstrap)
+
+ return test
+
+
+def finish_test(test):
+ yield "steps", test.steps
+
+
+def get_update_file_name(spec, update):
+ if spec.is_sync_committee_update(update):
+ suffix1 = "s"
+ else:
+ suffix1 = "x"
+ if spec.is_finality_update(update):
+ suffix2 = "f"
+ else:
+ suffix2 = "x"
+ return f"update_{encode_hex(update.attested_header.hash_tree_root())}_{suffix1}{suffix2}"
+
+
+def get_checks(store):
+ return {
+ "finalized_header": {
+ 'slot': int(store.finalized_header.slot),
+ 'root': encode_hex(store.finalized_header.hash_tree_root()),
+ },
+ "optimistic_header": {
+ 'slot': int(store.optimistic_header.slot),
+ 'root': encode_hex(store.optimistic_header.hash_tree_root()),
+ },
+ }
+
+
+def emit_force_update(test, spec, state):
+ current_slot = state.slot
+ spec.process_light_client_store_force_update(test.store, current_slot)
+
+ yield from [] # Consistently enable `yield from` syntax in calling tests
+ test.steps.append({
+ "force_update": {
+ "current_slot": int(current_slot),
+ "checks": get_checks(test.store),
+ }
+ })
+
+
+def emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=True):
+ update = spec.create_light_client_update(state, block, attested_state, finalized_block)
+ if not with_next_sync_committee:
+ update.next_sync_committee = spec.SyncCommittee()
+ update.next_sync_committee_branch = \
+ [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
+ current_slot = state.slot
+ spec.process_light_client_update(test.store, update, current_slot, test.genesis_validators_root)
+
+ yield get_update_file_name(spec, update), update
+ test.steps.append({
+ "process_update": {
+ "update": get_update_file_name(spec, update),
+ "current_slot": int(current_slot),
+ "checks": get_checks(test.store),
+ }
+ })
+ return update
+
+
+def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
+ return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
+
+
+def compute_start_slot_at_next_sync_committee_period(spec, state):
+ sync_committee_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
+ return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_light_client_sync(spec, state):
+ # Start test
+ test = yield from setup_test(spec, state)
+
+ # Initial `LightClientUpdate`, populating `store.next_sync_committee`
+ # ```
+ # |
+ # +-----------+ +----------+ +-----------+ |
+ # | finalized | <-- (2 epochs) -- | attested | <-- | signature | |
+ # +-----------+ +----------+ +-----------+ |
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Advance to next sync committee period
+ # ```
+ # |
+ # +-----------+ +----------+ +-----------+ |
+ # | finalized | <-- (2 epochs) -- | attested | <-- | signature | |
+ # +-----------+ +----------+ +-----------+ |
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Edge case: Signature in next period
+ # ```
+ # |
+ # +-----------+ +----------+ | +-----------+
+ # | finalized | <-- (2 epochs) -- | attested | <-- | signature |
+ # +-----------+ +----------+ | +-----------+
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 2)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Edge case: Finalized header not included
+ # ```
+ # |
+ # + - - - - - + | +----------+ +-----------+
+ # ¦ finalized ¦ <-- (2 epochs) -- | attested | <-- | signature |
+ # + - - - - - + | +----------+ +-----------+
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update == update
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Non-finalized case: Attested `next_sync_committee` is not finalized
+ # ```
+ # |
+ # +-----------+ | +----------+ +-----------+
+ # | finalized | <-- (2 epochs) -- | attested | <-- | signature |
+ # +-----------+ | +----------+ +-----------+
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ attested_state = state.copy()
+ store_state = attested_state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update == update
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Force-update using timeout
+ # ```
+ # |
+ # +-----------+ | +----------+
+ # | finalized | <-- (2 epochs) -- | attested |
+ # +-----------+ | +----------+
+ # | ^
+ # | \
+ # sync committee `--- store.finalized_header
+ # period boundary
+ # ```
+ attested_state = state.copy()
+ next_slots(spec, state, spec.UPDATE_TIMEOUT - 1)
+ yield from emit_force_update(test, spec, state)
+ assert test.store.finalized_header.slot == store_state.slot
+ assert test.store.next_sync_committee == store_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == store_state.slot
+
+ # Edge case: Finalized header not included, after force-update
+ # ```
+ # | |
+ # + - - - - - + | +--+ +----------+ | +-----------+
+ # ¦ finalized ¦ <-- (2 epochs) -- | | <-- | attested | <-- | signature |
+ # + - - - - - + | +--+ +----------+ | +-----------+
+ # | / |
+ # | store.fin |
+ # sync committee sync committee
+ # period boundary period boundary
+ # ```
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
+ assert test.store.finalized_header.slot == store_state.slot
+ assert test.store.next_sync_committee == store_state.next_sync_committee
+ assert test.store.best_valid_update == update
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Edge case: Finalized header older than store
+ # ```
+ # | |
+ # +-----------+ | +--+ | +----------+ +-----------+
+ # | finalized | <-- (2 epochs) -- | | <-- | attested | <-- | signature |
+ # +-----------+ | +--+ | +----------+ +-----------+
+ # | / |
+ # | store.fin |
+ # sync committee sync committee
+ # period boundary period boundary
+ # ```
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == store_state.slot
+ assert test.store.next_sync_committee == store_state.next_sync_committee
+ assert test.store.best_valid_update == update
+ assert test.store.optimistic_header.slot == attested_state.slot
+ yield from emit_force_update(test, spec, state)
+ assert test.store.finalized_header.slot == attested_state.slot
+ assert test.store.next_sync_committee == attested_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Advance to next sync committee period
+ # ```
+ # |
+ # +-----------+ +----------+ +-----------+ |
+ # | finalized | <-- (2 epochs) -- | attested | <-- | signature | |
+ # +-----------+ +----------+ +-----------+ |
+ # |
+ # |
+ # sync committee
+ # period boundary
+ # ```
+ transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Finish test
+ yield from finish_test(test)
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_supply_sync_committee_from_past_update(spec, state):
+ # Advance the chain, so that a `LightClientUpdate` from the past is available
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ past_state = state.copy()
+
+ # Start test
+ test = yield from setup_test(spec, state)
+ assert not spec.is_next_sync_committee_known(test.store)
+
+ # Apply `LightClientUpdate` from the past, populating `store.next_sync_committee`
+ yield from emit_update(test, spec, past_state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == state.slot
+
+ # Finish test
+ yield from finish_test(test)
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_advance_finality_without_sync_committee(spec, state):
+ # Start test
+ test = yield from setup_test(spec, state)
+
+ # Initial `LightClientUpdate`, populating `store.next_sync_committee`
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Advance finality into next sync committee period, but omit `next_sync_committee`
+ transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
+ finalized_block = state_transition_with_full_block(spec, state, True, True)
+ finalized_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
+ justified_block = state_transition_with_full_block(spec, state, True, True)
+ justified_state = state.copy()
+ _, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert not spec.is_next_sync_committee_known(test.store)
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Advance finality once more, with `next_sync_committee` still unknown
+ past_state = finalized_state
+ finalized_block = justified_block
+ finalized_state = justified_state
+ _, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
+ attested_state = state.copy()
+ sync_aggregate, _ = get_sync_aggregate(spec, state)
+ block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
+
+ # Apply `LightClientUpdate` without `finalized_header` nor `next_sync_committee`
+ update = yield from emit_update(test, spec, state, block, attested_state, None, with_next_sync_committee=False)
+ assert test.store.finalized_header.slot == past_state.slot
+ assert not spec.is_next_sync_committee_known(test.store)
+ assert test.store.best_valid_update == update
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Apply `LightClientUpdate` with `finalized_header` but no `next_sync_committee`
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert not spec.is_next_sync_committee_known(test.store)
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Apply full `LightClientUpdate`, supplying `next_sync_committee`
+ yield from emit_update(test, spec, state, block, attested_state, finalized_block)
+ assert test.store.finalized_header.slot == finalized_state.slot
+ assert test.store.next_sync_committee == finalized_state.next_sync_committee
+ assert test.store.best_valid_update is None
+ assert test.store.optimistic_header.slot == attested_state.slot
+
+ # Finish test
+ yield from finish_test(test)
diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py
new file mode 100644
index 000000000..ae5d5296a
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py
@@ -0,0 +1,161 @@
+from eth2spec.test.context import (
+ spec_state_test,
+ with_presets,
+ with_altair_and_later,
+)
+from eth2spec.test.helpers.attestations import (
+ next_slots_with_attestations,
+ state_transition_with_full_block,
+)
+from eth2spec.test.helpers.constants import MINIMAL
+from eth2spec.test.helpers.light_client import (
+ get_sync_aggregate,
+ signed_block_to_header,
+)
+from eth2spec.test.helpers.state import (
+ next_slots,
+)
+from math import floor
+
+
+def create_update(spec, test, with_next_sync_committee, with_finality, participation_rate):
+ attested_state, attested_block, finalized_block = test
+ num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
+
+ attested_header = signed_block_to_header(spec, attested_block)
+
+ if with_next_sync_committee:
+ next_sync_committee = attested_state.next_sync_committee
+ next_sync_committee_branch = spec.compute_merkle_proof_for_state(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
+ else:
+ next_sync_committee = spec.SyncCommittee()
+ next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
+
+ if with_finality:
+ finalized_header = signed_block_to_header(spec, finalized_block)
+ finality_branch = spec.compute_merkle_proof_for_state(attested_state, spec.FINALIZED_ROOT_INDEX)
+ else:
+ finalized_header = spec.BeaconBlockHeader()
+ finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
+
+ sync_aggregate, signature_slot = get_sync_aggregate(spec, attested_state, num_participants)
+
+ return spec.LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finalized_header,
+ finality_branch=finality_branch,
+ sync_aggregate=sync_aggregate,
+ signature_slot=signature_slot,
+ )
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_update_ranking(spec, state):
+ # Set up blocks and states:
+ # - `sig_finalized` / `sig_attested` --> Only signature in next sync committee period
+ # - `att_finalized` / `att_attested` --> Attested header also in next sync committee period
+ # - `fin_finalized` / `fin_attested` --> Finalized header also in next sync committee period
+ # - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.slot`
+ next_slots(spec, state, spec.compute_start_slot_at_epoch(spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 3) - 1)
+ sig_finalized_block = state_transition_with_full_block(spec, state, True, True)
+ _, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
+ att_finalized_block = state_transition_with_full_block(spec, state, True, True)
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 2, True, True)
+ sig_attested_block = state_transition_with_full_block(spec, state, True, True)
+ sig_attested_state = state.copy()
+ att_attested_block = state_transition_with_full_block(spec, state, True, True)
+ att_attested_state = state.copy()
+ fin_finalized_block = att_attested_block
+ _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
+ fin_attested_block = state_transition_with_full_block(spec, state, True, True)
+ fin_attested_state = state.copy()
+ lat_finalized_block = fin_finalized_block
+ lat_attested_block = state_transition_with_full_block(spec, state, True, True)
+ lat_attested_state = state.copy()
+ sig = (sig_attested_state, sig_attested_block, sig_finalized_block)
+ att = (att_attested_state, att_attested_block, att_finalized_block)
+ fin = (fin_attested_state, fin_attested_block, fin_finalized_block)
+ lat = (lat_attested_state, lat_attested_block, lat_finalized_block)
+
+ # Create updates (in descending order of quality)
+ updates = [
+ # Updates with sync committee finality
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
+
+ # Updates without sync committee finality
+ create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
+
+ # Updates without indication of any finality
+ create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
+
+ # Updates with sync committee finality but no `next_sync_committee`
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
+
+ # Updates without sync committee finality and also no `next_sync_committee`
+ create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
+
+ # Updates without indication of any finality nor `next_sync_committee`
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
+
+ # Updates with low sync committee participation
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
+
+ # Updates with very low sync committee participation
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
+ create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
+ create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
+ create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
+ create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
+ create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
+ create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
+ create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
+ ]
+ yield "updates", updates
+
+ for i in range(len(updates) - 1):
+ assert spec.is_better_update(updates[i], updates[i + 1])
diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py
index 32861b866..dcbd8a38b 100644
--- a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py
+++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py
@@ -3,6 +3,9 @@ from eth2spec.test.context import (
ForkMeta,
with_fork_metas,
)
+from eth2spec.test.helpers.random import (
+ randomize_state,
+)
from eth2spec.test.helpers.constants import (
ALL_PRE_POST_FORKS,
)
@@ -17,9 +20,31 @@ from eth2spec.test.helpers.fork_transition import (
skip_slots,
state_transition_across_slots,
transition_to_next_epoch_and_append_blocks,
+ transition_until_fork,
)
+@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
+def test_simple_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
+ transition_until_fork(spec, state, fork_epoch)
+
+ # check pre state
+ assert spec.get_current_epoch(state) < fork_epoch
+
+ yield "pre", state
+
+ # irregular state transition to handle fork:
+ blocks = []
+ state, block = do_fork(state, spec, post_spec, fork_epoch)
+ blocks.append(post_tag(block))
+
+ # continue regular state transition with new spec into next epoch
+ transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
+
+ yield "blocks", blocks
+ yield "post", state
+
+
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
@@ -56,6 +81,37 @@ def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag
yield "post", state
+@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=8) for pre, post in ALL_PRE_POST_FORKS])
+def test_transition_randomized_state(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
+ randomize_state(spec, state)
+
+ transition_until_fork(spec, state, fork_epoch)
+
+ # check pre state
+ assert spec.get_current_epoch(state) < fork_epoch
+
+ yield "pre", state
+
+ # irregular state transition to handle fork:
+ blocks = []
+ # since there are slashed validators, set with_block=False here
+ state, _ = do_fork(state, spec, post_spec, fork_epoch, with_block=False)
+ slashed_indices = [index for index, validator in enumerate(state.validators) if validator.slashed]
+
+ # continue regular state transition with new spec into next epoch
+ transition_to_next_epoch_and_append_blocks(
+ post_spec,
+ state,
+ post_tag,
+ blocks,
+ only_last_block=True,
+ ignoring_proposers=slashed_indices,
+ )
+
+ yield "blocks", blocks
+ yield "post", state
+
+
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/transition/__init__.py b/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/__init__.py
similarity index 100%
rename from tests/core/pyspec/eth2spec/test/bellatrix/transition/__init__.py
rename to tests/core/pyspec/eth2spec/test/altair/unittests/light_client/__init__.py
diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py b/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py
new file mode 100644
index 000000000..e7845292a
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py
@@ -0,0 +1,201 @@
+from copy import deepcopy
+
+from eth2spec.test.context import (
+ spec_state_test_with_matching_config,
+ with_presets,
+ with_altair_and_later,
+)
+from eth2spec.test.helpers.attestations import (
+ next_epoch_with_attestations,
+ state_transition_with_full_block,
+)
+from eth2spec.test.helpers.constants import MINIMAL
+from eth2spec.test.helpers.light_client import (
+ get_sync_aggregate,
+ initialize_light_client_store,
+ signed_block_to_header,
+)
+from eth2spec.test.helpers.state import (
+ next_slots,
+)
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+def test_process_light_client_update_not_timeout(spec, state):
+ store = initialize_light_client_store(spec, state)
+
+ # Block at slot 1 doesn't increase sync committee period, so it won't force update store.finalized_header
+ attested_block = state_transition_with_full_block(spec, state, False, False)
+ attested_header = signed_block_to_header(spec, attested_block)
+
+ # Sync committee signing the attested_header
+ sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
+ next_sync_committee = spec.SyncCommittee()
+ next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
+
+ # Ensure that finality checkpoint is genesis
+ assert state.finalized_checkpoint.epoch == 0
+ # Finality is unchanged
+ finality_header = spec.BeaconBlockHeader()
+ finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
+
+ update = spec.LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finality_header,
+ finality_branch=finality_branch,
+ sync_aggregate=sync_aggregate,
+ signature_slot=signature_slot,
+ )
+
+ pre_store = deepcopy(store)
+
+ spec.process_light_client_update(store, update, signature_slot, state.genesis_validators_root)
+
+ assert store.finalized_header == pre_store.finalized_header
+ assert store.best_valid_update == update
+ assert store.optimistic_header == update.attested_header
+ assert store.current_max_active_participants > 0
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_process_light_client_update_at_period_boundary(spec, state):
+ store = initialize_light_client_store(spec, state)
+
+ # Forward to slot before next sync committee period so that next block is final one in period
+ next_slots(spec, state, spec.UPDATE_TIMEOUT - 2)
+ store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
+ update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
+ assert store_period == update_period
+
+ attested_block = state_transition_with_full_block(spec, state, False, False)
+ attested_header = signed_block_to_header(spec, attested_block)
+
+ # Sync committee signing the attested_header
+ sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
+ next_sync_committee = spec.SyncCommittee()
+ next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
+
+ # Finality is unchanged
+ finality_header = spec.BeaconBlockHeader()
+ finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
+
+ update = spec.LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finality_header,
+ finality_branch=finality_branch,
+ sync_aggregate=sync_aggregate,
+ signature_slot=signature_slot,
+ )
+
+ pre_store = deepcopy(store)
+
+ spec.process_light_client_update(store, update, signature_slot, state.genesis_validators_root)
+
+ assert store.finalized_header == pre_store.finalized_header
+ assert store.best_valid_update == update
+ assert store.optimistic_header == update.attested_header
+ assert store.current_max_active_participants > 0
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_process_light_client_update_timeout(spec, state):
+ store = initialize_light_client_store(spec, state)
+
+ # Forward to next sync committee period
+ next_slots(spec, state, spec.UPDATE_TIMEOUT)
+ store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
+ update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
+ assert store_period + 1 == update_period
+
+ attested_block = state_transition_with_full_block(spec, state, False, False)
+ attested_header = signed_block_to_header(spec, attested_block)
+
+ # Sync committee signing the attested_header
+ sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
+
+ # Sync committee is updated
+ next_sync_committee = state.next_sync_committee
+ next_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
+ # Finality is unchanged
+ finality_header = spec.BeaconBlockHeader()
+ finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
+
+ update = spec.LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finality_header,
+ finality_branch=finality_branch,
+ sync_aggregate=sync_aggregate,
+ signature_slot=signature_slot,
+ )
+
+ pre_store = deepcopy(store)
+
+ spec.process_light_client_update(store, update, signature_slot, state.genesis_validators_root)
+
+ assert store.finalized_header == pre_store.finalized_header
+ assert store.best_valid_update == update
+ assert store.optimistic_header == update.attested_header
+ assert store.current_max_active_participants > 0
+
+
+@with_altair_and_later
+@spec_state_test_with_matching_config
+@with_presets([MINIMAL], reason="too slow")
+def test_process_light_client_update_finality_updated(spec, state):
+ store = initialize_light_client_store(spec, state)
+
+ # Change finality
+ blocks = []
+ next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
+ for epoch in range(3):
+ prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
+ blocks += new_blocks
+ # Ensure that finality checkpoint has changed
+ assert state.finalized_checkpoint.epoch == 3
+ # Ensure that it's same period
+ store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
+ update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
+ assert store_period == update_period
+
+ attested_block = blocks[-1]
+ attested_header = signed_block_to_header(spec, attested_block)
+
+ # Sync committee signing the attested_header
+ sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
+
+ # Updated sync_committee and finality
+ next_sync_committee = spec.SyncCommittee()
+ next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
+ finalized_block = blocks[spec.SLOTS_PER_EPOCH - 1]
+ finalized_header = signed_block_to_header(spec, finalized_block)
+ assert finalized_header.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
+ assert finalized_header.hash_tree_root() == state.finalized_checkpoint.root
+ finality_branch = spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
+
+ update = spec.LightClientUpdate(
+ attested_header=attested_header,
+ next_sync_committee=next_sync_committee,
+ next_sync_committee_branch=next_sync_committee_branch,
+ finalized_header=finalized_header,
+ finality_branch=finality_branch,
+ sync_aggregate=sync_aggregate,
+ signature_slot=signature_slot,
+ )
+
+ spec.process_light_client_update(store, update, signature_slot, state.genesis_validators_root)
+
+ assert store.finalized_header == update.finalized_header
+ assert store.best_valid_update is None
+ assert store.optimistic_header == update.attested_header
+ assert store.current_max_active_participants > 0
diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py
index f1503c39f..b7df49790 100644
--- a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py
+++ b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py
@@ -1,4 +1,11 @@
-from eth2spec.test.context import spec_configured_state_test, with_phases
+from eth2spec.test.context import (
+ is_post_capella,
+ is_post_eip4844,
+ spec_configured_state_test,
+ spec_state_test_with_matching_config,
+ with_all_phases,
+ with_phases,
+)
from eth2spec.test.helpers.constants import ALTAIR
@@ -17,3 +24,34 @@ def test_config_override(spec, state):
# TODO: it would be nice if the create_genesis_state actually outputs a state
# for the fork with a slot that matches at least the fork boundary.
# assert spec.get_current_epoch(state) >= 4
+
+
+@with_all_phases
+@spec_state_test_with_matching_config
+def test_override_config_fork_epoch(spec, state):
+ if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
+ return
+
+ assert spec.config.ALTAIR_FORK_EPOCH == spec.GENESIS_EPOCH
+ if state.fork.current_version == spec.config.ALTAIR_FORK_VERSION:
+ return
+
+ assert spec.config.BELLATRIX_FORK_EPOCH == spec.GENESIS_EPOCH
+ if state.fork.current_version == spec.config.BELLATRIX_FORK_VERSION:
+ return
+
+ if is_post_capella(spec):
+ assert spec.config.CAPELLA_FORK_EPOCH == spec.GENESIS_EPOCH
+ if state.fork.current_version == spec.config.CAPELLA_FORK_VERSION:
+ return
+
+ if is_post_eip4844(spec):
+ assert spec.config.EIP4844_FORK_EPOCH == spec.GENESIS_EPOCH
+ if state.fork.current_version == spec.config.EIP4844_FORK_VERSION:
+ return
+
+ assert spec.config.SHARDING_FORK_EPOCH == spec.GENESIS_EPOCH
+ if state.fork.current_version == spec.config.SHARDING_FORK_VERSION:
+ return
+
+ assert False # Fork is missing
diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_sync_protocol.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_sync_protocol.py
deleted file mode 100644
index 04553b3f8..000000000
--- a/tests/core/pyspec/eth2spec/test/altair/unittests/test_sync_protocol.py
+++ /dev/null
@@ -1,179 +0,0 @@
-from copy import deepcopy
-
-from eth2spec.test.context import (
- spec_state_test,
- with_presets,
- with_altair_and_later,
-)
-from eth2spec.test.helpers.attestations import (
- next_epoch_with_attestations,
-)
-from eth2spec.test.helpers.block import (
- build_empty_block,
- build_empty_block_for_next_slot,
-)
-from eth2spec.test.helpers.constants import MINIMAL
-from eth2spec.test.helpers.light_client import (
- get_sync_aggregate,
- initialize_light_client_store,
-)
-from eth2spec.test.helpers.state import (
- next_slots,
- state_transition_and_sign_block,
-)
-from eth2spec.test.helpers.merkle import build_proof
-
-
-@with_altair_and_later
-@spec_state_test
-def test_process_light_client_update_not_timeout(spec, state):
- store = initialize_light_client_store(spec, state)
-
- # Block at slot 1 doesn't increase sync committee period, so it won't force update store.finalized_header
- block = build_empty_block_for_next_slot(spec, state)
- signed_block = state_transition_and_sign_block(spec, state, block)
- block_header = spec.BeaconBlockHeader(
- slot=signed_block.message.slot,
- proposer_index=signed_block.message.proposer_index,
- parent_root=signed_block.message.parent_root,
- state_root=signed_block.message.state_root,
- body_root=signed_block.message.body.hash_tree_root(),
- )
- # Sync committee signing the header
- sync_aggregate = get_sync_aggregate(spec, state, block_header, block_root=None)
- next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
-
- # Ensure that finality checkpoint is genesis
- assert state.finalized_checkpoint.epoch == 0
- # Finality is unchanged
- finality_header = spec.BeaconBlockHeader()
- finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
-
- update = spec.LightClientUpdate(
- attested_header=block_header,
- next_sync_committee=state.next_sync_committee,
- next_sync_committee_branch=next_sync_committee_branch,
- finalized_header=finality_header,
- finality_branch=finality_branch,
- sync_aggregate=sync_aggregate,
- fork_version=state.fork.current_version,
- )
-
- pre_store = deepcopy(store)
-
- spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
-
- assert store.current_max_active_participants > 0
- assert store.optimistic_header == update.attested_header
- assert store.finalized_header == pre_store.finalized_header
- assert store.best_valid_update == update
-
-
-@with_altair_and_later
-@spec_state_test
-@with_presets([MINIMAL], reason="too slow")
-def test_process_light_client_update_timeout(spec, state):
- store = initialize_light_client_store(spec, state)
-
- # Forward to next sync committee period
- next_slots(spec, state, spec.UPDATE_TIMEOUT)
- snapshot_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
- update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
- assert snapshot_period + 1 == update_period
-
- block = build_empty_block_for_next_slot(spec, state)
- signed_block = state_transition_and_sign_block(spec, state, block)
- block_header = spec.BeaconBlockHeader(
- slot=signed_block.message.slot,
- proposer_index=signed_block.message.proposer_index,
- parent_root=signed_block.message.parent_root,
- state_root=signed_block.message.state_root,
- body_root=signed_block.message.body.hash_tree_root(),
- )
-
- # Sync committee signing the finalized_block_header
- sync_aggregate = get_sync_aggregate(
- spec, state, block_header, block_root=spec.Root(block_header.hash_tree_root()))
-
- # Sync committee is updated
- next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
- # Finality is unchanged
- finality_header = spec.BeaconBlockHeader()
- finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
-
- update = spec.LightClientUpdate(
- attested_header=block_header,
- next_sync_committee=state.next_sync_committee,
- next_sync_committee_branch=next_sync_committee_branch,
- finalized_header=finality_header,
- finality_branch=finality_branch,
- sync_aggregate=sync_aggregate,
- fork_version=state.fork.current_version,
- )
-
- pre_store = deepcopy(store)
-
- spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
-
- assert store.current_max_active_participants > 0
- assert store.optimistic_header == update.attested_header
- assert store.best_valid_update == update
- assert store.finalized_header == pre_store.finalized_header
-
-
-@with_altair_and_later
-@spec_state_test
-@with_presets([MINIMAL], reason="too slow")
-def test_process_light_client_update_finality_updated(spec, state):
- store = initialize_light_client_store(spec, state)
-
- # Change finality
- blocks = []
- next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
- for epoch in range(3):
- prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
- blocks += new_blocks
- # Ensure that finality checkpoint has changed
- assert state.finalized_checkpoint.epoch == 3
- # Ensure that it's same period
- snapshot_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
- update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
- assert snapshot_period == update_period
-
- # Updated sync_committee and finality
- next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
- finalized_block_header = blocks[spec.SLOTS_PER_EPOCH - 1].message
- assert finalized_block_header.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
- assert finalized_block_header.hash_tree_root() == state.finalized_checkpoint.root
- finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
-
- # Build block header
- block = build_empty_block(spec, state)
- block_header = spec.BeaconBlockHeader(
- slot=block.slot,
- proposer_index=block.proposer_index,
- parent_root=block.parent_root,
- state_root=state.hash_tree_root(),
- body_root=block.body.hash_tree_root(),
- )
-
- # Sync committee signing the finalized_block_header
- sync_aggregate = get_sync_aggregate(
- spec, state, block_header, block_root=spec.Root(block_header.hash_tree_root()))
-
- update = spec.LightClientUpdate(
- attested_header=block_header,
- next_sync_committee=state.next_sync_committee,
- next_sync_committee_branch=next_sync_committee_branch,
- finalized_header=finalized_block_header,
- finality_branch=finality_branch,
- sync_aggregate=sync_aggregate,
- fork_version=state.fork.current_version,
- )
-
- spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
-
- assert store.current_max_active_participants > 0
- assert store.optimistic_header == update.attested_header
- assert store.finalized_header == update.finalized_header
- assert store.best_valid_update is None
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
index cd7a74259..2819b2a24 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
@@ -1,5 +1,8 @@
+from random import Random
+
from eth2spec.test.helpers.execution_payload import (
build_empty_execution_payload,
+ build_randomized_execution_payload,
get_execution_payload_header,
build_state_with_incomplete_transition,
build_state_with_complete_transition,
@@ -46,14 +49,8 @@ def run_execution_payload_processing(spec, state, execution_payload, valid=True,
assert state.latest_execution_payload_header == get_execution_payload_header(spec, execution_payload)
-@with_bellatrix_and_later
-@spec_state_test
-def test_success_first_payload(spec, state):
- # pre-state
- state = build_state_with_incomplete_transition(spec, state)
+def run_success_test(spec, state):
next_slot(spec, state)
-
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@@ -61,12 +58,23 @@ def test_success_first_payload(spec, state):
@with_bellatrix_and_later
@spec_state_test
-def test_success_regular_payload(spec, state):
- # pre-state
- state = build_state_with_complete_transition(spec, state)
- next_slot(spec, state)
+def test_success_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
- # execution payload
+ yield from run_success_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_success_regular_payload(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+
+ yield from run_success_test(spec, state)
+
+
+def run_gap_slot_test(spec, state):
+ next_slot(spec, state)
+ next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@@ -75,83 +83,66 @@ def test_success_regular_payload(spec, state):
@with_bellatrix_and_later
@spec_state_test
def test_success_first_payload_with_gap_slot(spec, state):
- # pre-state
state = build_state_with_incomplete_transition(spec, state)
- next_slot(spec, state)
- next_slot(spec, state)
-
- # execution payload
- execution_payload = build_empty_execution_payload(spec, state)
-
- yield from run_execution_payload_processing(spec, state, execution_payload)
+ yield from run_gap_slot_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_success_regular_payload_with_gap_slot(spec, state):
- # pre-state
state = build_state_with_complete_transition(spec, state)
- next_slot(spec, state)
- next_slot(spec, state)
+ yield from run_gap_slot_test(spec, state)
- # execution payload
+
+def run_bad_execution_test(spec, state):
+ # completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
+ next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
- yield from run_execution_payload_processing(spec, state, execution_payload)
+ yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_bad_execution_first_payload(spec, state):
- # completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
-
- # pre-state
state = build_state_with_incomplete_transition(spec, state)
- next_slot(spec, state)
-
- # execution payload
- execution_payload = build_empty_execution_payload(spec, state)
-
- yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
+ yield from run_bad_execution_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_bad_execution_regular_payload(spec, state):
- # completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
-
- # pre-state
state = build_state_with_complete_transition(spec, state)
+ yield from run_bad_execution_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_bad_parent_hash_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
+ execution_payload.parent_hash = b'\x55' * 32
- yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
+ yield from run_execution_payload_processing(spec, state, execution_payload, valid=True)
@with_bellatrix_and_later
@spec_state_test
def test_bad_parent_hash_regular_payload(spec, state):
- # pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
-@with_bellatrix_and_later
-@spec_state_test
-def test_bad_random_first_payload(spec, state):
- # pre-state
- state = build_state_with_incomplete_transition(spec, state)
+def run_bad_prev_randao_test(spec, state):
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.prev_randao = b'\x42' * 32
@@ -160,26 +151,21 @@ def test_bad_random_first_payload(spec, state):
@with_bellatrix_and_later
@spec_state_test
-def test_bad_random_regular_payload(spec, state):
- # pre-state
- state = build_state_with_complete_transition(spec, state)
- next_slot(spec, state)
-
- # execution payload
- execution_payload = build_empty_execution_payload(spec, state)
- execution_payload.prev_randao = b'\x04' * 32
-
- yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
+def test_bad_prev_randao_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_bad_prev_randao_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
-def test_bad_everything_regular_payload(spec, state):
- # pre-state
+def test_bad_pre_randao_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
+ yield from run_bad_prev_randao_test(spec, state)
+
+
+def run_bad_everything_test(spec, state):
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.prev_randao = spec.Bytes32()
@@ -190,59 +176,171 @@ def test_bad_everything_regular_payload(spec, state):
@with_bellatrix_and_later
@spec_state_test
-def test_bad_timestamp_first_payload(spec, state):
- # pre-state
+def test_bad_everything_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
+ yield from run_bad_everything_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_bad_everything_regular_payload(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_bad_everything_test(spec, state)
+
+
+def run_bad_timestamp_test(spec, state, is_future):
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
- execution_payload.timestamp = execution_payload.timestamp + 1
+ if is_future:
+ timestamp = execution_payload.timestamp + 1
+ else:
+ timestamp = execution_payload.timestamp - 1
+ execution_payload.timestamp = timestamp
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
-def test_bad_timestamp_regular_payload(spec, state):
- # pre-state
+def test_future_timestamp_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_bad_timestamp_test(spec, state, is_future=True)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_future_timestamp_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
+ yield from run_bad_timestamp_test(spec, state, is_future=True)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_past_timestamp_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_bad_timestamp_test(spec, state, is_future=False)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_past_timestamp_regular_payload(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_bad_timestamp_test(spec, state, is_future=False)
+
+
+def run_non_empty_extra_data_test(spec, state):
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
- execution_payload.timestamp = execution_payload.timestamp + 1
+ execution_payload.extra_data = b'\x45' * 12
- yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
+ yield from run_execution_payload_processing(spec, state, execution_payload)
+ assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_extra_data_first_payload(spec, state):
- # pre-state
state = build_state_with_incomplete_transition(spec, state)
- next_slot(spec, state)
-
- # execution payload
- execution_payload = build_empty_execution_payload(spec, state)
- execution_payload.extra_data = b'\x45' * 12
-
- yield from run_execution_payload_processing(spec, state, execution_payload)
-
- assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
+ yield from run_non_empty_extra_data_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_extra_data_regular_payload(spec, state):
- # pre-state
state = build_state_with_complete_transition(spec, state)
+ yield from run_non_empty_extra_data_test(spec, state)
+
+
+def run_non_empty_transactions_test(spec, state):
next_slot(spec, state)
- # execution payload
execution_payload = build_empty_execution_payload(spec, state)
- execution_payload.extra_data = b'\x45' * 12
+ num_transactions = 2
+ execution_payload.transactions = [
+ spec.Transaction(b'\x99' * 128)
+ for _ in range(num_transactions)
+ ]
yield from run_execution_payload_processing(spec, state, execution_payload)
+ assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
- assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_non_empty_transactions_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_non_empty_extra_data_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_non_empty_transactions_regular_payload(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_non_empty_extra_data_test(spec, state)
+
+
+def run_zero_length_transaction_test(spec, state):
+ next_slot(spec, state)
+
+ execution_payload = build_empty_execution_payload(spec, state)
+ execution_payload.transactions = [spec.Transaction(b'')]
+ assert len(execution_payload.transactions[0]) == 0
+
+ yield from run_execution_payload_processing(spec, state, execution_payload)
+ assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_zero_length_transaction_first_payload(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_zero_length_transaction_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_zero_length_transaction_regular_payload(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_zero_length_transaction_test(spec, state)
+
+
+def run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=True, rng=Random(5555)):
+ next_slot(spec, state)
+ execution_payload = build_randomized_execution_payload(spec, state, rng)
+
+ yield from run_execution_payload_processing(
+ spec, state,
+ execution_payload,
+ valid=execution_valid, execution_valid=execution_valid
+ )
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_randomized_non_validated_execution_fields_first_payload__valid(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_randomized_non_validated_execution_fields_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_randomized_non_validated_execution_fields_regular_payload__valid(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_randomized_non_validated_execution_fields_test(spec, state)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_randomized_non_validated_execution_fields_first_payload__invalid(spec, state):
+ state = build_state_with_incomplete_transition(spec, state)
+ yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
+
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_randomized_non_validated_execution_fields_regular_payload__invalid(spec, state):
+ state = build_state_with_complete_transition(spec, state)
+ yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py
index bad922d4f..ef6bb75a9 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py
@@ -1,9 +1,14 @@
+from random import Random
from eth2spec.test.helpers.state import (
- state_transition_and_sign_block
+ state_transition_and_sign_block,
+ next_slot,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
+from eth2spec.test.helpers.execution_payload import (
+ build_randomized_execution_payload
+)
from eth2spec.test.context import (
with_bellatrix_and_later, spec_state_test
)
@@ -22,7 +27,21 @@ def test_empty_block_transition_no_tx(spec, state):
yield 'blocks', [signed_block]
yield 'post', state
-# TODO: tests with EVM, mock or replacement?
+
+@with_bellatrix_and_later
+@spec_state_test
+def test_empty_block_transition_randomized_payload(spec, state):
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(spec, state)
+ next_slot_state = state.copy()
+ next_slot(spec, next_slot_state)
+ block.body.execution_payload = build_randomized_execution_payload(spec, next_slot_state, rng=Random(34433))
+
+ signed_block = state_transition_and_sign_block(spec, state, block)
+
+ yield 'blocks', [signed_block]
+ yield 'post', state
@with_bellatrix_and_later
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/bellatrix/transition/test_transition.py
deleted file mode 100644
index 2382a3758..000000000
--- a/tests/core/pyspec/eth2spec/test/bellatrix/transition/test_transition.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from eth2spec.test.context import (
- ForkMeta,
- with_fork_metas,
-)
-from eth2spec.test.helpers.constants import (
- AFTER_BELLATRIX_PRE_POST_FORKS,
-)
-from eth2spec.test.helpers.fork_transition import (
- do_fork,
- transition_to_next_epoch_and_append_blocks,
- transition_until_fork,
-)
-
-
-@with_fork_metas([
- ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in AFTER_BELLATRIX_PRE_POST_FORKS
-])
-def test_sample_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
- transition_until_fork(spec, state, fork_epoch)
-
- # check pre state
- assert spec.get_current_epoch(state) < fork_epoch
-
- yield "pre", state
-
- # irregular state transition to handle fork:
- blocks = []
- state, block = do_fork(state, spec, post_spec, fork_epoch)
- blocks.append(post_tag(block))
-
- # continue regular state transition with new spec into next epoch
- transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
-
- yield "blocks", blocks
- yield "post", state
diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
index 204816c99..26ace24b3 100644
--- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
+++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
@@ -7,8 +7,8 @@ from eth2spec.test.context import spec_state_test, expect_assertion_error, with_
from eth2spec.test.helpers.state import next_slot
-def prepare_withdrawals_queue(spec, state, num_withdrawals):
- pre_queue_len = len(state.withdrawals_queue)
+def prepare_withdrawal_queue(spec, state, num_withdrawals):
+ pre_queue_len = len(state.withdrawal_queue)
for i in range(num_withdrawals):
withdrawal = spec.Withdrawal(
@@ -16,9 +16,9 @@ def prepare_withdrawals_queue(spec, state, num_withdrawals):
address=b'\x42' * 20,
amount=200000 + i,
)
- state.withdrawals_queue.append(withdrawal)
+ state.withdrawal_queue.append(withdrawal)
- assert len(state.withdrawals_queue) == num_withdrawals + pre_queue_len
+ assert len(state.withdrawal_queue) == num_withdrawals + pre_queue_len
def run_withdrawals_processing(spec, state, execution_payload, valid=True):
@@ -30,8 +30,8 @@ def run_withdrawals_processing(spec, state, execution_payload, valid=True):
If ``valid == False``, run expecting ``AssertionError``
"""
- pre_withdrawals_queue = state.withdrawals_queue.copy()
- num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(pre_withdrawals_queue))
+ pre_withdrawal_queue = state.withdrawal_queue.copy()
+ num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(pre_withdrawal_queue))
yield 'pre', state
yield 'execution_payload', execution_payload
@@ -45,18 +45,18 @@ def run_withdrawals_processing(spec, state, execution_payload, valid=True):
yield 'post', state
- if len(pre_withdrawals_queue) == 0:
- assert len(state.withdrawals_queue) == 0
- elif len(pre_withdrawals_queue) <= num_withdrawals:
- assert len(state.withdrawals_queue) == 0
+ if len(pre_withdrawal_queue) == 0:
+ assert len(state.withdrawal_queue) == 0
+ elif len(pre_withdrawal_queue) <= num_withdrawals:
+ assert len(state.withdrawal_queue) == 0
else:
- assert state.withdrawals_queue == pre_withdrawals_queue[num_withdrawals:]
+ assert state.withdrawal_queue == pre_withdrawal_queue[num_withdrawals:]
@with_capella_and_later
@spec_state_test
def test_success_empty_queue(spec, state):
- assert len(state.withdrawals_queue) == 0
+ assert len(state.withdrawal_queue) == 0
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -67,7 +67,7 @@ def test_success_empty_queue(spec, state):
@with_capella_and_later
@spec_state_test
def test_success_one_in_queue(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -78,7 +78,7 @@ def test_success_one_in_queue(spec, state):
@with_capella_and_later
@spec_state_test
def test_success_max_per_slot_in_queue(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -89,7 +89,7 @@ def test_success_max_per_slot_in_queue(spec, state):
@with_capella_and_later
@spec_state_test
def test_success_a_lot_in_queue(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -104,7 +104,7 @@ def test_success_a_lot_in_queue(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_empty_queue_non_empty_withdrawals(spec, state):
- assert len(state.withdrawals_queue) == 0
+ assert len(state.withdrawal_queue) == 0
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -121,7 +121,7 @@ def test_fail_empty_queue_non_empty_withdrawals(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_one_in_queue_none_in_withdrawals(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -133,7 +133,7 @@ def test_fail_one_in_queue_none_in_withdrawals(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_one_in_queue_two_in_withdrawals(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -145,7 +145,7 @@ def test_fail_one_in_queue_two_in_withdrawals(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_max_per_slot_in_queue_one_less_in_withdrawals(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -157,7 +157,7 @@ def test_fail_max_per_slot_in_queue_one_less_in_withdrawals(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_a_lot_in_queue_too_few_in_withdrawals(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -173,7 +173,7 @@ def test_fail_a_lot_in_queue_too_few_in_withdrawals(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_incorrect_dequeue_index(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -185,7 +185,7 @@ def test_fail_incorrect_dequeue_index(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_incorrect_dequeue_address(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -197,7 +197,7 @@ def test_fail_incorrect_dequeue_address(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_incorrect_dequeue_amount(spec, state):
- prepare_withdrawals_queue(spec, state, 1)
+ prepare_withdrawal_queue(spec, state, 1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -209,7 +209,7 @@ def test_fail_incorrect_dequeue_amount(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_one_of_many_dequeued_incorrectly(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@@ -227,7 +227,7 @@ def test_fail_one_of_many_dequeued_incorrectly(spec, state):
@with_capella_and_later
@spec_state_test
def test_fail_many_dequeued_incorrectly(spec, state):
- prepare_withdrawals_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
+ prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
diff --git a/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_full_withdrawals.py b/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_full_withdrawals.py
index 305f6e1ba..1498666bb 100644
--- a/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_full_withdrawals.py
+++ b/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_full_withdrawals.py
@@ -17,8 +17,8 @@ def set_validator_withdrawable(spec, state, index, withdrawable_epoch=None):
def run_process_full_withdrawals(spec, state, num_expected_withdrawals=None):
- pre_withdrawal_index = state.withdrawal_index
- pre_withdrawals_queue = state.withdrawals_queue
+ pre_next_withdrawal_index = state.next_withdrawal_index
+ pre_withdrawal_queue = state.withdrawal_queue.copy()
to_be_withdrawn_indices = [
index for index, validator in enumerate(state.validators)
if spec.is_fully_withdrawable_validator(validator, spec.get_current_epoch(state))
@@ -26,6 +26,8 @@ def run_process_full_withdrawals(spec, state, num_expected_withdrawals=None):
if num_expected_withdrawals is not None:
assert len(to_be_withdrawn_indices) == num_expected_withdrawals
+ else:
+ num_expected_withdrawals = len(to_be_withdrawn_indices)
yield from run_epoch_processing_with(spec, state, 'process_full_withdrawals')
@@ -34,8 +36,8 @@ def run_process_full_withdrawals(spec, state, num_expected_withdrawals=None):
assert validator.fully_withdrawn_epoch == spec.get_current_epoch(state)
assert state.balances[index] == 0
- assert len(state.withdrawals_queue) == len(pre_withdrawals_queue) + num_expected_withdrawals
- assert state.withdrawal_index == pre_withdrawal_index + num_expected_withdrawals
+ assert len(state.withdrawal_queue) == len(pre_withdrawal_queue) + num_expected_withdrawals
+ assert state.next_withdrawal_index == pre_next_withdrawal_index + num_expected_withdrawals
@with_capella_and_later
@@ -65,10 +67,10 @@ def test_single_withdrawal(spec, state):
# Make one validator withdrawable
set_validator_withdrawable(spec, state, 0)
- assert state.withdrawal_index == 0
+ assert state.next_withdrawal_index == 0
yield from run_process_full_withdrawals(spec, state, 1)
- assert state.withdrawal_index == 1
+ assert state.next_withdrawal_index == 1
@with_capella_and_later
diff --git a/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_partial_withdrawals.py b/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_partial_withdrawals.py
new file mode 100644
index 000000000..bf2e73fa1
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/capella/epoch_processing/test_process_partial_withdrawals.py
@@ -0,0 +1,224 @@
+import random
+from eth2spec.test.helpers.constants import MINIMAL
+from eth2spec.test.context import (
+ with_capella_and_later,
+ spec_state_test,
+ with_presets,
+)
+from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to
+from eth2spec.test.helpers.state import next_epoch
+from eth2spec.test.helpers.random import randomize_state
+
+
+def set_validator_partially_withdrawable(spec, state, index, rng=random.Random(666)):
+ validator = state.validators[index]
+ validator.withdrawal_credentials = spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
+ validator.effective_balance = spec.MAX_EFFECTIVE_BALANCE
+ state.balances[index] = spec.MAX_EFFECTIVE_BALANCE + rng.randint(1, 100000000)
+
+ assert spec.is_partially_withdrawable_validator(validator, state.balances[index])
+
+
+def run_process_partial_withdrawals(spec, state, num_expected_withdrawals=None):
+ # Run rest of epoch processing before predicting partial withdrawals as
+ # balance changes can affect withdrawability
+ run_epoch_processing_to(spec, state, 'process_partial_withdrawals')
+
+ pre_next_withdrawal_index = state.next_withdrawal_index
+ pre_withdrawal_queue = state.withdrawal_queue.copy()
+
+ partially_withdrawable_indices = [
+ index for index, validator in enumerate(state.validators)
+ if spec.is_partially_withdrawable_validator(validator, state.balances[index])
+ ]
+ num_partial_withdrawals = min(len(partially_withdrawable_indices), spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
+
+ if num_expected_withdrawals is not None:
+ assert num_partial_withdrawals == num_expected_withdrawals
+ else:
+ num_expected_withdrawals = num_partial_withdrawals
+
+ yield 'pre', state
+ spec.process_partial_withdrawals(state)
+ yield 'post', state
+
+ post_partially_withdrawable_indices = [
+ index for index, validator in enumerate(state.validators)
+ if spec.is_partially_withdrawable_validator(validator, state.balances[index])
+ ]
+
+ assert len(partially_withdrawable_indices) - num_partial_withdrawals == len(post_partially_withdrawable_indices)
+
+ assert len(state.withdrawal_queue) == len(pre_withdrawal_queue) + num_expected_withdrawals
+ assert state.next_withdrawal_index == pre_next_withdrawal_index + num_expected_withdrawals
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_no_withdrawable(spec, state):
+ pre_validators = state.validators.copy()
+ yield from run_process_partial_withdrawals(spec, state, 0)
+
+ assert pre_validators == state.validators
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable(spec, state):
+ validator_index = len(state.validators) // 2
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable_not_yet_active(spec, state):
+ validator_index = len(state.validators) // 2
+ state.validators[validator_index].activation_epoch += 4
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
+ validator_index = len(state.validators) // 2
+ state.validators[validator_index].exit_epoch = spec.get_current_epoch(state) + 1
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
+ assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state) + 1)
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable_exited(spec, state):
+ validator_index = len(state.validators) // 2
+ state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
+ validator_index = len(state.validators) // 2
+ state.validators[validator_index].slashed = True
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
+ validator_index = len(state.validators) // 2
+ state.validators[validator_index].slashed = True
+ state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
+ set_validator_partially_withdrawable(spec, state, validator_index)
+
+ assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
+
+ yield from run_process_partial_withdrawals(spec, state, 1)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_two_partial_withdrawable(spec, state):
+ set_validator_partially_withdrawable(spec, state, 0)
+ set_validator_partially_withdrawable(spec, state, 1)
+
+ yield from run_process_partial_withdrawals(spec, state, 2)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_success_max_partial_withdrawable(spec, state):
+ # Sanity check that this test works for this state
+ assert len(state.validators) >= spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH
+
+ for i in range(spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH):
+ set_validator_partially_withdrawable(spec, state, i)
+
+ yield from run_process_partial_withdrawals(spec, state, spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
+
+
+@with_capella_and_later
+@with_presets([MINIMAL], reason="not no enough validators with mainnet config")
+@spec_state_test
+def test_success_max_plus_one_withdrawable(spec, state):
+ # Sanity check that this test works for this state
+ assert len(state.validators) >= spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH + 1
+
+ # More than MAX_PARTIAL_WITHDRAWALS_PER_EPOCH partially withdrawable
+ for i in range(spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH + 1):
+ set_validator_partially_withdrawable(spec, state, i)
+
+ # Should only have MAX_PARTIAL_WITHDRAWALS_PER_EPOCH withdrawals created
+ yield from run_process_partial_withdrawals(spec, state, spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
+
+
+def run_random_partial_withdrawals_test(spec, state, rng):
+ for _ in range(rng.randint(0, 2)):
+ next_epoch(spec, state)
+ randomize_state(spec, state, rng)
+
+ num_validators = len(state.validators)
+ state.next_partial_withdrawal_validator_index = rng.randint(0, num_validators - 1)
+
+ num_partially_withdrawable = rng.randint(0, num_validators - 1)
+ partially_withdrawable_indices = rng.sample(range(num_validators), num_partially_withdrawable)
+ for index in partially_withdrawable_indices:
+ set_validator_partially_withdrawable(spec, state, index)
+
+ # Note: due to the randomness and other epoch processing, some of these set as "partially withdrawable"
+ # may not be partially withdrawable once we get to ``process_partial_withdrawals``,
+ # thus *not* using the optional third param in this call
+ yield from run_process_partial_withdrawals(spec, state)
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_0(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(0))
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_1(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(1))
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_2(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(2))
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_3(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(3))
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_4(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(4))
+
+
+@with_capella_and_later
+@spec_state_test
+def test_random_5(spec, state):
+ yield from run_random_partial_withdrawals_test(spec, state, random.Random(5))
diff --git a/tests/core/pyspec/eth2spec/test/conftest.py b/tests/core/pyspec/eth2spec/test/conftest.py
index b3c250c11..a5f19e20c 100644
--- a/tests/core/pyspec/eth2spec/test/conftest.py
+++ b/tests/core/pyspec/eth2spec/test/conftest.py
@@ -51,7 +51,7 @@ def pytest_addoption(parser):
def _validate_fork_name(forks):
for fork in forks:
- if fork not in ALL_PHASES:
+ if fork not in set(ALL_PHASES):
raise ValueError(
f'The given --fork argument "{fork}" is not an available fork.'
f' The available forks: {ALL_PHASES}'
diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py
index 35e4f1bcb..bc04c05f2 100644
--- a/tests/core/pyspec/eth2spec/test/context.py
+++ b/tests/core/pyspec/eth2spec/test/context.py
@@ -1,19 +1,19 @@
import pytest
from dataclasses import dataclass
import importlib
-from eth_utils import encode_hex
from eth2spec.phase0 import mainnet as spec_phase0_mainnet, minimal as spec_phase0_minimal
from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal
from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal
from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal
+from eth2spec.eip4844 import mainnet as spec_eip4844_mainnet, minimal as spec_eip4844_minimal
from eth2spec.utils import bls
from .exceptions import SkippedTest
from .helpers.constants import (
- PHASE0, ALTAIR, BELLATRIX, CAPELLA,
+ PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, SHARDING,
MINIMAL, MAINNET,
- ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX, FORKS_BEFORE_CAPELLA,
+ ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX,
ALL_FORK_UPGRADES,
)
from .helpers.typing import SpecForkName, PresetBaseName
@@ -76,12 +76,14 @@ spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
ALTAIR: spec_altair_minimal,
BELLATRIX: spec_bellatrix_minimal,
CAPELLA: spec_capella_minimal,
+ EIP4844: spec_eip4844_minimal,
},
MAINNET: {
PHASE0: spec_phase0_mainnet,
ALTAIR: spec_altair_mainnet,
BELLATRIX: spec_bellatrix_mainnet,
CAPELLA: spec_capella_mainnet,
+ EIP4844: spec_eip4844_mainnet
},
}
@@ -277,6 +279,53 @@ def spec_configured_state_test(conf):
return decorator
+def _check_current_version(spec, state, version_name):
+ fork_version_field = version_name.upper() + '_FORK_VERSION'
+ try:
+ fork_version = getattr(spec.config, fork_version_field)
+ except Exception:
+ return False
+ else:
+ return state.fork.current_version == fork_version
+
+
+def config_fork_epoch_overrides(spec, state):
+ overrides = {}
+ if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
+ pass
+ elif _check_current_version(spec, state, ALTAIR):
+ overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ elif _check_current_version(spec, state, BELLATRIX):
+ overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ elif _check_current_version(spec, state, CAPELLA):
+ overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ elif _check_current_version(spec, state, EIP4844):
+ overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['EIP4844_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ elif _check_current_version(spec, state, SHARDING):
+ overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ overrides['SHARDING_FORK_EPOCH'] = spec.GENESIS_EPOCH
+ else:
+ assert False # Fork is missing
+ return overrides
+
+
+def spec_state_test_with_matching_config(fn):
+ def decorator(fn):
+ def wrapper(*args, spec: Spec, **kw):
+ conf = config_fork_epoch_overrides(spec, kw['state'])
+ overrides = with_config_overrides(conf)
+ return overrides(fn)(*args, spec=spec, **kw)
+ return wrapper
+ return spec_test(with_state(decorator(single_phase(fn))))
+
+
def expect_assertion_error(fn):
bad = False
try:
@@ -473,18 +522,22 @@ def with_presets(preset_bases, reason=None):
return decorator
+class quoted_str(str):
+ pass
+
+
def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
- Get dict of Python built-in types from a dict of SSZ objects.
+ Get dict of basic types from a dict of SSZ objects.
"""
result = {}
for k, v in ssz_dict.items():
if isinstance(v, int):
value = int(v)
elif isinstance(v, bytes):
- value = encode_hex(v)
+ value = bytes(bytearray(v))
else:
- value = str(v)
+ value = quoted_str(v)
result[k] = value
return result
@@ -520,7 +573,7 @@ def with_config_overrides(config_overrides):
# To output the changed config to could be serialized with yaml test vectors,
# the dict SSZ objects have to be converted into Python built-in types.
output_config = _get_basic_dict(modified_config)
- yield 'config', 'data', output_config
+ yield 'config', 'cfg', output_config
spec.config = spec.Configuration(**modified_config)
@@ -543,12 +596,17 @@ def is_post_bellatrix(spec):
def is_post_capella(spec):
- return spec.fork not in FORKS_BEFORE_CAPELLA
+ return spec.fork == CAPELLA
+
+
+def is_post_eip4844(spec):
+ return spec.fork == EIP4844
with_altair_and_later = with_all_phases_except([PHASE0])
with_bellatrix_and_later = with_all_phases_except([PHASE0, ALTAIR])
-with_capella_and_later = with_all_phases_except([PHASE0, ALTAIR, BELLATRIX])
+with_capella_and_later = with_all_phases_except([PHASE0, ALTAIR, BELLATRIX, EIP4844])
+with_eip4844_and_later = with_all_phases_except([PHASE0, ALTAIR, BELLATRIX, CAPELLA])
def only_generator(reason):
diff --git a/tests/generators/merkle/__init__.py b/tests/core/pyspec/eth2spec/test/eip4844/sanity/__init__.py
similarity index 100%
rename from tests/generators/merkle/__init__.py
rename to tests/core/pyspec/eth2spec/test/eip4844/sanity/__init__.py
diff --git a/tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py
new file mode 100644
index 000000000..f08a7fda9
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py
@@ -0,0 +1,43 @@
+from eth2spec.test.helpers.state import (
+ state_transition_and_sign_block
+)
+from eth2spec.test.helpers.block import (
+ build_empty_block_for_next_slot
+)
+from eth2spec.test.context import (
+ spec_state_test,
+ with_eip4844_and_later,
+)
+from eth2spec.test.helpers.sharding import (
+ get_sample_opaque_tx,
+)
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_one_blob(spec, state):
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ signed_block = state_transition_and_sign_block(spec, state, block)
+
+ yield 'blocks', [signed_block]
+ yield 'post', state
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_multiple_blobs(spec, state):
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=5)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ signed_block = state_transition_and_sign_block(spec, state, block)
+
+ yield 'blocks', [signed_block]
+ yield 'post', state
diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/eip4844/unittests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py b/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py
new file mode 100644
index 000000000..7474707b9
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py
@@ -0,0 +1,21 @@
+
+from eth2spec.test.helpers.constants import (
+ EIP4844,
+ MINIMAL,
+)
+from eth2spec.test.helpers.sharding import (
+ get_sample_blob,
+)
+from eth2spec.test.context import (
+ with_phases,
+ spec_state_test,
+ with_presets,
+)
+
+
+@with_phases([EIP4844])
+@spec_state_test
+@with_presets([MINIMAL])
+def test_blob_to_kzg_commitment(spec, state):
+ blob = get_sample_blob(spec)
+ spec.blob_to_kzg_commitment(blob)
diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/eip4844/unittests/validator/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/eip4844/unittests/validator/test_validator.py
new file mode 100644
index 000000000..d0250f3df
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/eip4844/unittests/validator/test_validator.py
@@ -0,0 +1,62 @@
+from eth2spec.test.helpers.state import (
+ state_transition_and_sign_block,
+)
+from eth2spec.test.helpers.block import (
+ build_empty_block_for_next_slot
+)
+from eth2spec.test.context import (
+ spec_state_test,
+ with_eip4844_and_later,
+)
+from eth2spec.test.helpers.sharding import (
+ get_sample_opaque_tx,
+ get_sample_blob,
+)
+from eth2spec.test.helpers.keys import privkeys
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_verify_kzg_proof(spec, state):
+ x = 3
+ polynomial = get_sample_blob(spec)
+ polynomial = [int(i) for i in polynomial]
+ commitment = spec.blob_to_kzg_commitment(polynomial)
+
+ # Get the proof
+ proof = spec.compute_kzg_proof(polynomial, x)
+
+ y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
+ assert spec.verify_kzg_proof(commitment, x, y, proof)
+
+
+def _run_validate_blobs_sidecar_test(spec, state, blob_count):
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ state_transition_and_sign_block(spec, state, block)
+
+ blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
+ privkey = privkeys[1]
+ spec.get_signed_blobs_sidecar(state, blobs_sidecar, privkey)
+ expected_commitments = [spec.blob_to_kzg_commitment(blobs[i]) for i in range(blob_count)]
+ spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_validate_blobs_sidecar_one_blob(spec, state):
+ _run_validate_blobs_sidecar_test(spec, state, blob_count=1)
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_validate_blobs_sidecar_two_blobs(spec, state):
+ _run_validate_blobs_sidecar_test(spec, state, blob_count=2)
+
+
+@with_eip4844_and_later
+@spec_state_test
+def test_validate_blobs_sidecar_ten_blobs(spec, state):
+ _run_validate_blobs_sidecar_test(spec, state, blob_count=10)
diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py
index ffd484ecd..7ba71a969 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py
@@ -246,7 +246,12 @@ def next_epoch_with_attestations(spec,
)
-def state_transition_with_full_block(spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=None):
+def state_transition_with_full_block(spec,
+ state,
+ fill_cur_epoch,
+ fill_prev_epoch,
+ participation_fn=None,
+ sync_aggregate=None):
"""
Build and apply a block with attestions at the calculated `slot_to_attest` of current epoch and/or previous epoch.
"""
@@ -272,6 +277,8 @@ def state_transition_with_full_block(spec, state, fill_cur_epoch, fill_prev_epoc
)
for attestation in attestations:
block.body.attestations.append(attestation)
+ if sync_aggregate is not None:
+ block.body.sync_aggregate = sync_aggregate
signed_block = state_transition_and_sign_block(spec, state, block)
return signed_block
diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py
index 0bc6b2e08..b1463b97b 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/constants.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py
@@ -14,9 +14,15 @@ CAPELLA = SpecForkName('capella')
SHARDING = SpecForkName('sharding')
CUSTODY_GAME = SpecForkName('custody_game')
DAS = SpecForkName('das')
+EIP4844 = SpecForkName('eip4844')
-# The forks that pytest runs with.
-ALL_PHASES = (PHASE0, ALTAIR, BELLATRIX, CAPELLA)
+# The forks that pytest can run with.
+ALL_PHASES = (
+ # Formal forks
+ PHASE0, ALTAIR, BELLATRIX, CAPELLA,
+ # Experimental patches
+ EIP4844,
+)
# The forks that output to the test vectors.
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX)
diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
index 8c27480c0..e18c239f1 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
@@ -29,6 +29,7 @@ def get_process_calls(spec):
),
'process_sync_committee_updates', # altair
'process_full_withdrawals', # capella
+ 'process_partial_withdrawals', # capella
# TODO: add sharding processing functions when spec stabilizes.
]
diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
index 5e7066632..83162e1c2 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
@@ -1,4 +1,5 @@
-from eth2spec.test.helpers.constants import FORKS_BEFORE_CAPELLA
+from eth2spec.debug.random_value import get_random_bytes_list
+from eth2spec.test.context import is_post_capella
def build_empty_execution_payload(spec, state, randao_mix=None):
@@ -28,9 +29,9 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
block_hash=spec.Hash32(),
transactions=empty_txs,
)
- if spec.fork not in FORKS_BEFORE_CAPELLA:
- num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawals_queue))
- payload.withdrawals = state.withdrawals_queue[:num_withdrawals]
+ if is_post_capella(spec):
+ num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
+ payload.withdrawals = state.withdrawal_queue[:num_withdrawals]
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
@@ -38,6 +39,33 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
return payload
+def build_randomized_execution_payload(spec, state, rng):
+ execution_payload = build_empty_execution_payload(spec, state)
+ execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
+ execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
+ execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
+ execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
+ get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
+ )
+ execution_payload.block_number = rng.randint(0, 10e10)
+ execution_payload.gas_limit = rng.randint(0, 10e10)
+ execution_payload.gas_used = rng.randint(0, 10e10)
+ extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
+ execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
+ get_random_bytes_list(rng, extra_data_length)
+ )
+ execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
+ execution_payload.block_hash = spec.Hash32(get_random_bytes_list(rng, 32))
+
+ num_transactions = rng.randint(0, 100)
+ execution_payload.transactions = [
+ spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
+ for _ in range(num_transactions)
+ ]
+
+ return execution_payload
+
+
def get_execution_payload_header(spec, execution_payload):
payload_header = spec.ExecutionPayloadHeader(
parent_hash=execution_payload.parent_hash,
@@ -55,20 +83,26 @@ def get_execution_payload_header(spec, execution_payload):
block_hash=execution_payload.block_hash,
transactions_root=spec.hash_tree_root(execution_payload.transactions)
)
- if spec.fork not in FORKS_BEFORE_CAPELLA:
+ if is_post_capella(spec):
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
return payload_header
def build_state_with_incomplete_transition(spec, state):
- return build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
+ state = build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
+ assert not spec.is_merge_transition_complete(state)
+
+ return state
def build_state_with_complete_transition(spec, state):
pre_state_payload = build_empty_execution_payload(spec, state)
payload_header = get_execution_payload_header(spec, pre_state_payload)
- return build_state_with_execution_payload_header(spec, state, payload_header)
+ state = build_state_with_execution_payload_header(spec, state, payload_header)
+ assert spec.is_merge_transition_complete(state)
+
+ return state
def build_state_with_execution_payload_header(spec, state, execution_payload_header):
diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
index ca248d8a5..0280bc7fb 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
@@ -13,6 +13,7 @@ from eth2spec.test.helpers.constants import (
ALTAIR,
BELLATRIX,
CAPELLA,
+ EIP4844,
)
from eth2spec.test.helpers.deposits import (
prepare_state_and_deposit,
@@ -150,6 +151,8 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
state = post_spec.upgrade_to_bellatrix(state)
elif post_spec.fork == CAPELLA:
state = post_spec.upgrade_to_capella(state)
+ elif post_spec.fork == EIP4844:
+ state = post_spec.upgrade_to_eip4844(state)
assert state.fork.epoch == fork_epoch
diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py
index 83994c409..b7b941125 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py
@@ -1,6 +1,6 @@
from eth2spec.test.helpers.constants import (
- ALTAIR, BELLATRIX,
- FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX, FORKS_BEFORE_CAPELLA,
+ ALTAIR, BELLATRIX, CAPELLA, EIP4844,
+ FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX,
)
from eth2spec.test.helpers.keys import pubkeys
@@ -20,7 +20,7 @@ def build_mock_validator(spec, i: int, balance: int):
effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, spec.MAX_EFFECTIVE_BALANCE)
)
- if spec.fork not in FORKS_BEFORE_CAPELLA:
+ if spec.fork in (CAPELLA):
validator.fully_withdrawn_epoch = spec.FAR_FUTURE_EPOCH
return validator
@@ -57,6 +57,12 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
elif spec.fork == BELLATRIX:
previous_version = spec.config.ALTAIR_FORK_VERSION
current_version = spec.config.BELLATRIX_FORK_VERSION
+ elif spec.fork == CAPELLA:
+ previous_version = spec.config.BELLATRIX_FORK_VERSION
+ current_version = spec.config.CAPELLA_FORK_VERSION
+ elif spec.fork == EIP4844:
+ previous_version = spec.config.BELLATRIX_FORK_VERSION
+ current_version = spec.config.EIP4844_FORK_VERSION
state = spec.BeaconState(
genesis_time=0,
diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
index 15c764fc4..8d632b3a1 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
@@ -1,8 +1,22 @@
+from eth2spec.test.helpers.state import (
+ transition_to,
+)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
+ compute_committee_indices,
)
+def signed_block_to_header(spec, block):
+ return spec.BeaconBlockHeader(
+ slot=block.message.slot,
+ proposer_index=block.message.proposer_index,
+ parent_root=block.message.parent_root,
+ state_root=block.message.state_root,
+ body_root=block.message.body.hash_tree_root(),
+ )
+
+
def initialize_light_client_store(spec, state):
return spec.LightClientStore(
finalized_header=spec.BeaconBlockHeader(),
@@ -15,21 +29,34 @@ def initialize_light_client_store(spec, state):
)
-def get_sync_aggregate(spec, state, block_header, block_root=None, signature_slot=None):
+def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
+ # By default, the sync committee signs the previous slot
if signature_slot is None:
- signature_slot = block_header.slot
+ signature_slot = state.slot + 1
- all_pubkeys = [v.pubkey for v in state.validators]
- committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
- sync_committee_bits = [True] * len(committee)
+ # Ensure correct sync committee and fork version are selected
+ signature_state = state.copy()
+ transition_to(spec, signature_state, signature_slot)
+
+ # Fetch sync committee
+ committee_indices = compute_committee_indices(signature_state)
+ committee_size = len(committee_indices)
+
+ # By default, use full participation
+ if num_participants is None:
+ num_participants = committee_size
+ assert committee_size >= num_participants >= 0
+
+ # Compute sync aggregate
+ sync_committee_bits = [True] * num_participants + [False] * (committee_size - num_participants)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
- state,
- block_header.slot,
- committee,
- block_root=block_root,
+ signature_state,
+ signature_slot,
+ committee_indices[:num_participants],
)
- return spec.SyncAggregate(
+ sync_aggregate = spec.SyncAggregate(
sync_committee_bits=sync_committee_bits,
sync_committee_signature=sync_committee_signature,
)
+ return sync_aggregate, signature_slot
diff --git a/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py b/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
index c2cc6d98a..44ed0ae89 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
@@ -180,7 +180,7 @@ def get_random_voluntary_exits(spec, state, to_be_slashed_indices, rng):
def get_random_sync_aggregate(spec, state, slot, block_root=None, fraction_participated=1.0, rng=Random(2099)):
- committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
participant_count = int(len(committee_indices) * fraction_participated)
participant_indices = rng.sample(range(len(committee_indices)), participant_count)
participants = [
diff --git a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py
index faa4d4288..517190869 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py
@@ -30,7 +30,7 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=
# Altair introduces sync committee (SC) reward and penalty
sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0
if is_post_altair(spec) and block is not None:
- committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty(
spec,
diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py
new file mode 100644
index 000000000..6c90153fc
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py
@@ -0,0 +1,81 @@
+import random
+from eth2spec.utils.ssz.ssz_typing import (
+ Container,
+ Bytes20, Bytes32,
+ ByteList,
+ List,
+ Union,
+ boolean,
+ uint256, uint64,
+)
+from eth2spec.utils.ssz.ssz_impl import serialize
+
+
+#
+# Containers from EIP-4844
+#
+MAX_CALLDATA_SIZE = 2**24
+MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
+MAX_ACCESS_LIST_STORAGE_KEYS = 2**24
+MAX_ACCESS_LIST_SIZE = 2**24
+
+
+class AccessTuple(Container):
+ address: Bytes20 # Address = Bytes20
+ storage_keys: List[Bytes32, MAX_ACCESS_LIST_STORAGE_KEYS]
+
+
+class ECDSASignature(Container):
+ y_parity: boolean
+ r: uint256
+ s: uint256
+
+
+class BlobTransaction(Container):
+ chain_id: uint256
+ nonce: uint64
+ priority_fee_per_gas: uint256
+ max_basefee_per_gas: uint256
+ gas: uint64
+ to: Union[None, Bytes20] # Address = Bytes20
+ value: uint256
+ data: ByteList[MAX_CALLDATA_SIZE]
+ access_list: List[AccessTuple, MAX_ACCESS_LIST_SIZE]
+ blob_versioned_hashes: List[Bytes32, MAX_VERSIONED_HASHES_LIST_SIZE]
+
+
+class SignedBlobTransaction(Container):
+ message: BlobTransaction
+ signature: ECDSASignature
+
+
+def get_sample_blob(spec, rng=None):
+ if rng is None:
+ rng = random.Random(5566)
+
+ return spec.Blob([
+ rng.randint(0, spec.BLS_MODULUS - 1)
+ for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
+ ])
+
+
+def get_sample_opaque_tx(spec, blob_count=1, rng=None):
+ blobs = []
+ blob_kzg_commitments = []
+ blob_versioned_hashes = []
+ for _ in range(blob_count):
+ blob = get_sample_blob(spec, rng)
+ blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
+ blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
+ blobs.append(blob)
+ blob_kzg_commitments.append(blob_commitment)
+ blob_versioned_hashes.append(blob_versioned_hash)
+
+ signed_blob_tx = SignedBlobTransaction(
+ message=BlobTransaction(
+ blob_versioned_hashes=blob_versioned_hashes,
+ )
+ )
+ serialized_tx = serialize(signed_blob_tx)
+ opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx
+ return opaque_tx, blobs, blob_kzg_commitments
diff --git a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
index 417802ece..cc05b862b 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
@@ -74,7 +74,7 @@ def compute_sync_committee_proposer_reward(spec, state, committee_indices, commi
return spec.Gwei(participant_reward * participant_number)
-def compute_committee_indices(spec, state, committee=None):
+def compute_committee_indices(state, committee=None):
"""
Given a ``committee``, calculate and return the related indices
"""
@@ -129,11 +129,7 @@ def run_sync_committee_processing(spec, state, block, expect_exception=False):
if expect_exception:
assert pre_state.balances == state.balances
else:
- committee_indices = compute_committee_indices(
- spec,
- state,
- state.current_sync_committee,
- )
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
validate_sync_committee_rewards(
spec,
diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py
index 34a053c86..a09c02b9a 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py
@@ -34,7 +34,6 @@ from eth2spec.test.context import (
spec_test, spec_state_test, dump_skipping_message,
with_phases, with_all_phases, single_phase,
expect_assertion_error, always_bls,
- disable_process_reveal_deadlines,
with_presets,
with_custom_state,
large_validator_set,
@@ -774,7 +773,7 @@ def test_deposit_top_up(spec, state):
# Altair introduces sync committee (sm) reward and penalty
sync_committee_reward = sync_committee_penalty = 0
if is_post_altair(spec):
- committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
+ committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
sync_committee_reward, sync_committee_penalty = compute_sync_committee_participant_reward_and_penalty(
spec,
@@ -840,7 +839,6 @@ def test_attestation(spec, state):
@with_all_phases
@spec_state_test
-@disable_process_reveal_deadlines
def test_voluntary_exit(spec, state):
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
@@ -890,7 +888,6 @@ def test_double_validator_exit_same_block(spec, state):
@with_all_phases
@spec_state_test
-@disable_process_reveal_deadlines
def test_multiple_different_validator_exits_same_block(spec, state):
validator_indices = [
spec.get_active_validator_indices(state, spec.get_current_epoch(state))[i]
@@ -923,7 +920,6 @@ def test_multiple_different_validator_exits_same_block(spec, state):
@with_all_phases
@spec_state_test
-@disable_process_reveal_deadlines
def test_slash_and_exit_same_index(spec, state):
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
yield from run_slash_and_exit(spec, state, validator_index, validator_index, valid=False)
@@ -931,7 +927,6 @@ def test_slash_and_exit_same_index(spec, state):
@with_all_phases
@spec_state_test
-@disable_process_reveal_deadlines
def test_slash_and_exit_diff_index(spec, state):
slash_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
exit_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-2]
diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py
index 4e8d4bbaa..6e545cef7 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py
@@ -1,7 +1,7 @@
from eth2spec.test.context import with_all_phases, spec_state_test
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
-from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA
+from eth2spec.test.helpers.constants import ALL_PHASES
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store
@@ -19,7 +19,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
spec.on_attestation(store, attestation)
sample_index = indexed_attestation.attesting_indices[0]
- if spec.fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA):
+ if spec.fork in ALL_PHASES:
latest_message = spec.LatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root,
diff --git a/tests/core/pyspec/eth2spec/test/utils/utils.py b/tests/core/pyspec/eth2spec/test/utils/utils.py
index bad6c867b..14a7ceeb9 100644
--- a/tests/core/pyspec/eth2spec/test/utils/utils.py
+++ b/tests/core/pyspec/eth2spec/test/utils/utils.py
@@ -17,6 +17,7 @@ def vector_test(description: str = None):
# this wraps the function, to yield type-annotated entries of data.
# Valid types are:
# - "meta": all key-values with this type can be collected by the generator, to put somewhere together.
+ # - "cfg": spec config dictionary
# - "ssz": raw SSZ bytes
# - "data": a python structure to be encoded by the user.
def entry(*args, **kw):
diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py
index 9211e0ff0..e33017ade 100644
--- a/tests/core/pyspec/eth2spec/utils/bls.py
+++ b/tests/core/pyspec/eth2spec/utils/bls.py
@@ -1,5 +1,25 @@
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
+from py_ecc.optimized_bls12_381 import ( # noqa: F401
+ G1,
+ G2,
+ Z1,
+ Z2,
+ add,
+ multiply,
+ neg,
+ pairing,
+ final_exponentiate,
+ FQ12
+)
+from py_ecc.bls.g2_primitives import ( # noqa: F401
+ G1_to_pubkey as G1_to_bytes48,
+ pubkey_to_G1 as bytes48_to_G1,
+ G2_to_signature as G2_to_bytes96,
+ signature_to_G2 as bytes96_to_G2,
+)
+
+
import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
@@ -109,3 +129,12 @@ def SkToPk(SK):
return bls.SkToPk(SK)
else:
return bls.SkToPk(SK.to_bytes(32, 'big'))
+
+
+def pairing_check(values):
+ p_q_1, p_q_2 = values
+ final_exponentiation = final_exponentiate(
+ pairing(p_q_1[1], p_q_1[0], final_exponentiate=False)
+ * pairing(p_q_2[1], p_q_2[0], final_exponentiate=False)
+ )
+ return final_exponentiation == FQ12.one()
diff --git a/tests/core/pyspec/eth2spec/utils/kzg.py b/tests/core/pyspec/eth2spec/utils/kzg.py
new file mode 100644
index 000000000..e174e69ab
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/utils/kzg.py
@@ -0,0 +1,80 @@
+# Ref:
+# - https://github.com/ethereum/research/blob/8f084630528ba33d92b2bc05edf5338dd193c6f1/trusted_setup/trusted_setup.py
+# - https://github.com/asn-d6/kzgverify
+from py_ecc.optimized_bls12_381 import ( # noqa: F401
+ G1,
+ G2,
+ Z1,
+ Z2,
+ curve_order as BLS_MODULUS,
+ add,
+ multiply,
+ neg,
+)
+from eth2spec.utils import bls
+
+
+PRIMITIVE_ROOT_OF_UNITY = 7
+
+
+def generate_setup(generator, secret, length):
+ """
+ Generate trusted setup of ``generator`` in ``length``.
+ """
+ result = [generator]
+ for _ in range(1, length):
+ result.append(multiply(result[-1], secret))
+ return tuple(result)
+
+
+def fft(vals, modulus, domain):
+ """
+ FFT for group elements
+ """
+ if len(vals) == 1:
+ return vals
+ L = fft(vals[::2], modulus, domain[::2])
+ R = fft(vals[1::2], modulus, domain[::2])
+ o = [0] * len(vals)
+ for i, (x, y) in enumerate(zip(L, R)):
+ y_times_root = multiply(y, domain[i])
+ o[i] = add(x, y_times_root)
+ o[i + len(L)] = add(x, neg(y_times_root))
+ return o
+
+
+def compute_root_of_unity(length) -> int:
+ """
+ Generate a w such that ``w**length = 1``.
+ """
+ assert (BLS_MODULUS - 1) % length == 0
+ return pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // length, BLS_MODULUS)
+
+
+def compute_roots_of_unity(field_elements_per_blob):
+ """
+ Compute a list of roots of unity for a given order.
+ The order must divide the BLS multiplicative group order, i.e. BLS_MODULUS - 1
+ """
+ assert (BLS_MODULUS - 1) % field_elements_per_blob == 0
+ root_of_unity = compute_root_of_unity(length=field_elements_per_blob)
+
+ roots = []
+ current_root_of_unity = 1
+ for _ in range(field_elements_per_blob):
+ roots.append(current_root_of_unity)
+ current_root_of_unity = current_root_of_unity * root_of_unity % BLS_MODULUS
+ return roots
+
+
+def get_lagrange(setup):
+ """
+ Convert a G1 or G2 portion of a setup into the Lagrange basis.
+ """
+ root_of_unity = compute_root_of_unity(len(setup))
+ assert pow(root_of_unity, len(setup), BLS_MODULUS) == 1
+ domain = [pow(root_of_unity, i, BLS_MODULUS) for i in range(len(setup))]
+ # TODO: introduce an IFFT function for simplicity
+ fft_output = fft(setup, BLS_MODULUS, domain)
+ inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS)
+ return [bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output))]
diff --git a/tests/formats/README.md b/tests/formats/README.md
index e4f2bcb29..ec495daa5 100644
--- a/tests/formats/README.md
+++ b/tests/formats/README.md
@@ -29,7 +29,7 @@ This document defines the YAML format and structure used for consensus spec test
## About
-Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform.
+The consensus layer uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform.
### Test-case formats
@@ -115,7 +115,6 @@ some tests of earlier forks repeat with updated state data.
The well known bls/shuffling/ssz_static/operations/epoch_processing/etc. Handlers can change the format, but there is a general target to test.
-
### `/`
Specialization within category. All suites in here will have the same test case format.
@@ -171,9 +170,6 @@ bls_setting: int -- optional, can have 3 different values:
but there is no change of outcome when running the test if BLS is ON or OFF.
1: known as "BLS required" - if the test validity is strictly dependent on BLS being ON
2: known as "BLS ignored" - if the test validity is strictly dependent on BLS being OFF
-reveal_deadlines_setting: -- optional, can have 2 different values:
- 0: default, `process_reveal_deadlines` is ON.
- 1: `process_reveal_deadlines` is OFF.
```
##### `config.yaml`
diff --git a/tests/formats/light_client/README.md b/tests/formats/light_client/README.md
new file mode 100644
index 000000000..505b41601
--- /dev/null
+++ b/tests/formats/light_client/README.md
@@ -0,0 +1,8 @@
+# Light client sync protocol tests
+
+This series of tests provides reference test vectors for the light client sync protocol spec.
+
+Handlers:
+- `single_merkle_proof`: see [Single leaf merkle proof test format](./single_merkle_proof.md)
+- `sync`: see [Light client sync test format](./sync.md)
+- `update_ranking`: see [`LightClientUpdate` ranking test format](./update_ranking.md)
diff --git a/tests/formats/merkle/single_proof.md b/tests/formats/light_client/single_merkle_proof.md
similarity index 100%
rename from tests/formats/merkle/single_proof.md
rename to tests/formats/light_client/single_merkle_proof.md
diff --git a/tests/formats/light_client/sync.md b/tests/formats/light_client/sync.md
new file mode 100644
index 000000000..ad597fa1d
--- /dev/null
+++ b/tests/formats/light_client/sync.md
@@ -0,0 +1,68 @@
+# Light client sync tests
+
+This series of tests provides reference test vectors for validating that a light client implementing the sync protocol can sync to the latest block header.
+
+## Test case format
+
+### `meta.yaml`
+
+```yaml
+genesis_validators_root: Bytes32 -- string, hex encoded, with 0x prefix
+trusted_block_root: Bytes32 -- string, hex encoded, with 0x prefix
+```
+
+### `bootstrap.ssz_snappy`
+
+An SSZ-snappy encoded `bootstrap` object of type `LightClientBootstrap` to initialize a local `store` object of type `LightClientStore` using `initialize_light_client_store(trusted_block_rooot, bootstrap)`.
+
+### `steps.yaml`
+
+The steps to execute in sequence.
+
+#### Checks to run after each step
+
+Each step includes checks to verify the expected impact on the `store` object.
+
+```yaml
+finalized_header: {
+ slot: int, -- Integer value from store.finalized_header.slot
+ root: string, -- Encoded 32-byte value from store.finalized_header.hash_tree_root()
+}
+optimistic_header: {
+ slot: int, -- Integer value from store.optimistic_header.slot
+ root: string, -- Encoded 32-byte value from store.optimistic_header.hash_tree_root()
+}
+```
+
+#### `force_update` execution step
+
+The function `process_light_client_store_force_update(store, current_slot)`
+should be executed with the specified parameters:
+
+```yaml
+{
+ current_slot: int -- integer, decimal
+ checks: {: value} -- the assertions.
+}
+```
+
+After this step, the `store` object may have been updated.
+
+#### `process_update` execution step
+
+The function `process_light_client_update(store, update, current_slot, genesis_validators_root)` should be executed with the specified parameters:
+
+```yaml
+{
+ update: string -- name of the `*.ssz_snappy` file to load
+ as a `LightClientUpdate` object
+ current_slot: int -- integer, decimal
+ checks: {: value} -- the assertions.
+}
+```
+
+After this step, the `store` object may have been updated.
+
+## Condition
+
+A test-runner should initialize a local `LightClientStore` using the provided `bootstrap` object. It should then proceed to execute all the test steps in sequence. After each step, it should verify that the resulting `store` verifies against the provided `checks`.
diff --git a/tests/formats/light_client/update_ranking.md b/tests/formats/light_client/update_ranking.md
new file mode 100644
index 000000000..fe73fb9df
--- /dev/null
+++ b/tests/formats/light_client/update_ranking.md
@@ -0,0 +1,21 @@
+# `LightClientUpdate` ranking tests
+
+This series of tests provides reference test vectors for validating that `LightClientUpdate` instances are ranked in a canonical order.
+
+## Test case format
+
+### `meta.yaml`
+
+```yaml
+updates_count: int -- integer, decimal
+```
+
+### `updates_.ssz_snappy`
+
+A series of files, with `` in range `[0, updates_count)`, ordered by descending precedence according to `is_better_update` (best update at index 0).
+
+Each file is a SSZ-snappy encoded `LightClientUpdate`.
+
+## Condition
+
+A test-runner should load the provided `update` objects and verify that the local implementation ranks them in the same order. Note that the `update` objects are not restricted to a single sync committee period for the scope of this test.
diff --git a/tests/formats/merkle/README.md b/tests/formats/merkle/README.md
deleted file mode 100644
index c0f0a205b..000000000
--- a/tests/formats/merkle/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Merkle tests
-
-This series of tests provides reference test vectors for validating correct
-generation and verification of merkle proofs based on static data.
-
-Handlers:
-- `single_proof`: see [Single leaf proof test format](./single_proof.md)
-- Different types of merkle proofs may be supported in the future.
diff --git a/tests/formats/transition/README.md b/tests/formats/transition/README.md
index 37df65539..41a5bc01d 100644
--- a/tests/formats/transition/README.md
+++ b/tests/formats/transition/README.md
@@ -8,6 +8,7 @@ Clients should assume forks happen sequentially in the following manner:
0. `phase0`
1. `altair`
+2. `bellatrix`
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py
index 6fc86d10f..60c31d9d9 100644
--- a/tests/generators/bls/main.py
+++ b/tests/generators/bls/main.py
@@ -513,7 +513,6 @@ def create_provider(fork_name: SpecForkName,
def cases_fn() -> Iterable[gen_typing.TestCase]:
for data in test_case_fn():
- print(data)
(case_name, case_content) = data
yield gen_typing.TestCase(
fork_name=fork_name,
diff --git a/tests/generators/light_client/README.md b/tests/generators/light_client/README.md
new file mode 100644
index 000000000..7eabc2520
--- /dev/null
+++ b/tests/generators/light_client/README.md
@@ -0,0 +1,5 @@
+# Light client tests
+
+The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the light client sync protocol.
+
+Test-format documentation can be found [here](../../formats/light_client/README.md).
diff --git a/tests/generators/light_client/__init__.py b/tests/generators/light_client/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/generators/merkle/main.py b/tests/generators/light_client/main.py
similarity index 55%
rename from tests/generators/merkle/main.py
rename to tests/generators/light_client/main.py
index 5d07264ef..68a0da529 100644
--- a/tests/generators/merkle/main.py
+++ b/tests/generators/light_client/main.py
@@ -3,8 +3,10 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
if __name__ == "__main__":
- altair_mods = {key: 'eth2spec.test.altair.merkle.test_' + key for key in [
- 'single_proof',
+ altair_mods = {key: 'eth2spec.test.altair.light_client.test_' + key for key in [
+ 'single_merkle_proof',
+ 'sync',
+ 'update_ranking',
]}
bellatrix_mods = altair_mods
@@ -13,4 +15,4 @@ if __name__ == "__main__":
BELLATRIX: bellatrix_mods,
}
- run_state_test_generators(runner_name="merkle", all_mods=all_mods)
+ run_state_test_generators(runner_name="light_client", all_mods=all_mods)
diff --git a/tests/generators/merkle/requirements.txt b/tests/generators/light_client/requirements.txt
similarity index 100%
rename from tests/generators/merkle/requirements.txt
rename to tests/generators/light_client/requirements.txt
diff --git a/tests/generators/merkle/README.md b/tests/generators/merkle/README.md
deleted file mode 100644
index a19a67d9e..000000000
--- a/tests/generators/merkle/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Merkle
-
-The purpose of this test-generator is to provide test-vectors for validating the
-correct merkleization of objects and corresponding merkle proofs.
-
-Test-format documentation can be found [here](../../formats/merkle/README.md).
diff --git a/tests/generators/transition/main.py b/tests/generators/transition/main.py
index 5c3b43b4f..7de7213bd 100644
--- a/tests/generators/transition/main.py
+++ b/tests/generators/transition/main.py
@@ -16,9 +16,6 @@ from eth2spec.test.altair.transition import (
test_slashing as test_altair_slashing,
test_operations as test_altair_operations,
)
-from eth2spec.test.bellatrix.transition import (
- test_transition as test_bellatrix_transition,
-)
def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
@@ -47,10 +44,7 @@ if __name__ == "__main__":
test_altair_slashing,
test_altair_operations,
)
- bellatrix_tests = (
- test_bellatrix_transition,
- )
- all_tests = altair_tests + bellatrix_tests
+ all_tests = altair_tests
for transition_test_module in all_tests:
for pre_fork, post_fork in ALL_PRE_POST_FORKS:
gen_runner.run_generator("transition", [