mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-02-01 21:14:54 -05:00
Merge branch 'dev' into deposits
This commit is contained in:
@@ -142,7 +142,7 @@ jobs:
|
||||
command: make citest fork=capella
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-eip4844:
|
||||
test-deneb:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
@@ -152,7 +152,7 @@ jobs:
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run py-tests
|
||||
command: make citest fork=eip4844
|
||||
command: make citest fork=deneb
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
table_of_contents:
|
||||
@@ -272,7 +272,7 @@ workflows:
|
||||
- test-capella:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- test-eip4844:
|
||||
- test-deneb:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- table_of_contents
|
||||
|
||||
128
.github/workflows/run-tests.yml
vendored
Normal file
128
.github/workflows/run-tests.yml
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
name: Run spec tests and linter
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: zsh {0}
|
||||
|
||||
env:
|
||||
TEST_PRESET_TYPE: "minimal"
|
||||
DEFAULT_BRANCH: "dev"
|
||||
|
||||
# Run tests on workflow_Dispatch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- master
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_preset_type:
|
||||
default: minimal
|
||||
description: Type of test to run, either mainnet or minimal
|
||||
type: string
|
||||
required: true
|
||||
commitRef:
|
||||
description: The branch, tag or SHA to checkout and build from
|
||||
default: dev
|
||||
required: true
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
jobs:
|
||||
preclear:
|
||||
runs-on: self-hosted
|
||||
if: always()
|
||||
steps:
|
||||
- name: 'Cleanup build folder'
|
||||
run: |
|
||||
ls -la ./
|
||||
rm -rf ./* || true
|
||||
rm -rf ./.??* || true
|
||||
ls -la ./
|
||||
|
||||
table_of_contents:
|
||||
runs-on: self-hosted
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Check table of contents
|
||||
run: sudo npm install -g doctoc@2 && make check_toc
|
||||
|
||||
codespell:
|
||||
runs-on: self-hosted
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Check codespell
|
||||
run: pip install 'codespell<3.0.0,>=2.0.0' --user && make codespell
|
||||
|
||||
lint:
|
||||
runs-on: self-hosted
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Install pyspec requirements
|
||||
run: make install_test
|
||||
- name: Run linter for pyspec
|
||||
run: make lint
|
||||
- name: Run linter for test generators
|
||||
run: make lint_generators
|
||||
|
||||
pyspec-tests:
|
||||
runs-on: self-hosted
|
||||
needs: [preclear,lint,codespell,table_of_contents]
|
||||
strategy:
|
||||
matrix:
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "deneb"]
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: set TEST_PRESET_TYPE
|
||||
if: github.event.inputs.test_preset_type != ''
|
||||
run: |
|
||||
echo "spec_test_preset_type=${{ github.event.inputs.test_preset_type || env.TEST_PRESET_TYPE }}" >> $GITHUB_ENV
|
||||
- name: set TEST_PRESET_TYPE
|
||||
if: ${{ (github.event_name == 'push' && github.ref_name != 'master') || github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "spec_test_preset_type=${{ env.TEST_PRESET_TYPE}}" >> $GITHUB_ENV
|
||||
- name: set TEST_PRESET_TYPE
|
||||
if: ${{ github.event_name == 'push' && github.ref_name == 'master' }}
|
||||
run: |
|
||||
echo "spec_test_preset_type=mainnet" >> $GITHUB_ENV
|
||||
- name: set TEST_PRESET_TYPE
|
||||
if: github.event.schedule=='0 0 * * *'
|
||||
run: |
|
||||
echo "spec_test_preset_type=mainnet" >> $GITHUB_ENV
|
||||
- name: Install pyspec requirements
|
||||
run: make install_test
|
||||
- name: test-${{ matrix.version }}
|
||||
run: make citest fork=${{ matrix.version }} TEST_PRESET_TYPE=${{env.spec_test_preset_type}}
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: test-${{ matrix.version }}
|
||||
path: tests/core/pyspec/test-reports
|
||||
|
||||
cleanup:
|
||||
runs-on: self-hosted
|
||||
needs: [preclear,pyspec-tests,codespell,lint,table_of_contents]
|
||||
if: always()
|
||||
steps:
|
||||
- name: 'Cleanup build folder'
|
||||
run: |
|
||||
ls -la ./
|
||||
rm -rf ./* || true
|
||||
rm -rf ./.??* || true
|
||||
ls -la ./
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,7 +19,7 @@ tests/core/pyspec/eth2spec/phase0/
|
||||
tests/core/pyspec/eth2spec/altair/
|
||||
tests/core/pyspec/eth2spec/bellatrix/
|
||||
tests/core/pyspec/eth2spec/capella/
|
||||
tests/core/pyspec/eth2spec/eip4844/
|
||||
tests/core/pyspec/eth2spec/deneb/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
|
||||
34
Makefile
34
Makefile
@@ -13,7 +13,7 @@ SOLIDITY_DEPOSIT_CONTRACT_SOURCE = ${SOLIDITY_DEPOSIT_CONTRACT_DIR}/deposit_cont
|
||||
SOLIDITY_FILE_NAME = deposit_contract.json
|
||||
DEPOSIT_CONTRACT_TESTER_DIR = ${SOLIDITY_DEPOSIT_CONTRACT_DIR}/web3_tester
|
||||
CONFIGS_DIR = ./configs
|
||||
|
||||
TEST_PRESET_TYPE ?= minimal
|
||||
# Collect a list of generator names
|
||||
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
|
||||
# Map this list of generator paths to "gen_{generator name}" entries
|
||||
@@ -26,11 +26,11 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/bellatrix/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/capella/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/custody/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/das/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/sharding/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/eip4844/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/_features/custody/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/_features/das/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/_features/sharding/*.md) \
|
||||
$(wildcard $(SSZ_DIR)/*.md)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
@@ -67,7 +67,7 @@ partial_clean:
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/altair
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/capella
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/eip4844
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/deneb
|
||||
rm -rf $(COV_HTML_OUT_DIR)
|
||||
rm -rf $(TEST_REPORT_DIR)
|
||||
rm -rf eth2spec.egg-info dist build
|
||||
@@ -96,30 +96,30 @@ generate_tests: $(GENERATOR_TARGETS)
|
||||
|
||||
# "make pyspec" to create the pyspec for all phases.
|
||||
pyspec:
|
||||
. venv/bin/activate; python3 setup.py pyspecdev
|
||||
python3 -m venv venv; . venv/bin/activate; python3 setup.py pyspecdev
|
||||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[lint]; python3 -m pip install -e .[test]
|
||||
|
||||
# Testing against `minimal` config by default
|
||||
# Testing against `minimal` or `mainnet` config by default
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
# Testing against `minimal` config by default
|
||||
# Testing against `minimal` or `mainnet` config by default
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p $(TEST_REPORT_DIR);
|
||||
ifdef fork
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 4 --bls-type=milagro --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
|
||||
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
|
||||
else
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 4 --bls-type=milagro --junitxml=test-reports/test_results.xml eth2spec
|
||||
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
|
||||
endif
|
||||
|
||||
|
||||
@@ -135,15 +135,15 @@ check_toc: $(MARKDOWN_FILES:=.toc)
|
||||
rm $*.tmp
|
||||
|
||||
codespell:
|
||||
codespell . --skip ./.git -I .codespell-whitelist
|
||||
codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist
|
||||
|
||||
# TODO: add future protocol upgrade patch packages to linting.
|
||||
# NOTE: we use `pylint` just for catching unused arguments in spec code
|
||||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/eip4844 \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.eip4844
|
||||
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb
|
||||
|
||||
lint_generators: pyspec
|
||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||
|
||||
10
README.md
10
README.md
@@ -24,11 +24,11 @@ Features are researched and developed in parallel, and then consolidated into se
|
||||
### In-development Specifications
|
||||
| Code Name or Topic | Specs | Notes |
|
||||
| - | - | - |
|
||||
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
|
||||
| EIP4844 (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/eip4844/beacon-chain.md)</li><li>[EIP-4844 fork](specs/eip4844/fork.md)</li><li>[Polynomial commitments](specs/eip4844/polynomial-commitments.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/eip4844/validator.md)</li><li>[P2P networking](specs/eip4844/p2p-interface.md)</li></ul></ul> |
|
||||
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/sharding/p2p-interface.md)</li></ul></ul> |
|
||||
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
|
||||
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/das/das-core.md)</li><li>[Fork choice changes](specs/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/das/p2p-interface.md)</li><li>[Sampling process](specs/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
|
||||
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
|
||||
| Deneb (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/deneb/beacon-chain.md)</li><li>[Deneb fork](specs/deneb/fork.md)</li><li>[Polynomial commitments](specs/deneb/polynomial-commitments.md)</li><li>[Fork choice changes](specs/deneb/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))</li></ul><ul><li>[Honest validator guide changes](specs/deneb/validator.md)</li><li>[P2P networking](specs/deneb/p2p-interface.md)</li></ul></ul> |
|
||||
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/_features/sharding/p2p-interface.md)</li></ul></ul> |
|
||||
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
|
||||
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/_features/das/das-core.md)</li><li>[Fork choice changes](specs/_features/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/_features/das/p2p-interface.md)</li><li>[Sampling process](specs/_features/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
|
||||
@@ -47,9 +47,9 @@ BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x03000000
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
# EIP4844
|
||||
EIP4844_FORK_VERSION: 0x04000000
|
||||
EIP4844_FORK_EPOCH: 18446744073709551615
|
||||
# Deneb
|
||||
DENEB_FORK_VERSION: 0x04000000
|
||||
DENEB_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -46,9 +46,9 @@ BELLATRIX_FORK_EPOCH: 18446744073709551615
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x03000001
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
# EIP4844
|
||||
EIP4844_FORK_VERSION: 0x04000001
|
||||
EIP4844_FORK_EPOCH: 18446744073709551615
|
||||
# DENEB
|
||||
DENEB_FORK_VERSION: 0x04000001
|
||||
DENEB_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Mainnet preset - Phase0
|
||||
# Mainnet preset - Deneb
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
@@ -1,4 +1,4 @@
|
||||
# Minimal preset - Phase0
|
||||
# Minimal preset - Deneb
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
89
setup.py
89
setup.py
@@ -46,7 +46,7 @@ PHASE0 = 'phase0'
|
||||
ALTAIR = 'altair'
|
||||
BELLATRIX = 'bellatrix'
|
||||
CAPELLA = 'capella'
|
||||
EIP4844 = 'eip4844'
|
||||
DENEB = 'deneb'
|
||||
|
||||
|
||||
# The helper functions that are used when defining constants
|
||||
@@ -616,11 +616,26 @@ from eth2spec.bellatrix import {preset_name} as bellatrix
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def sundry_functions(cls) -> str:
|
||||
return super().sundry_functions() + '\n\n' + '''
|
||||
def compute_merkle_proof_for_block_body(body: BeaconBlockBody,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
return build_proof(body.get_backing(), index)'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
constants = {
|
||||
'EXECUTION_PAYLOAD_INDEX': 'GeneralizedIndex(25)',
|
||||
}
|
||||
return {**super().hardcoded_ssz_dep_constants(), **constants}
|
||||
|
||||
#
|
||||
# EIP4844SpecBuilder
|
||||
# DenebSpecBuilder
|
||||
#
|
||||
class EIP4844SpecBuilder(CapellaSpecBuilder):
|
||||
fork: str = EIP4844
|
||||
class DenebSpecBuilder(CapellaSpecBuilder):
|
||||
fork: str = DENEB
|
||||
|
||||
@classmethod
|
||||
def imports(cls, preset_name: str):
|
||||
@@ -638,37 +653,9 @@ T = TypeVar('T') # For generic function
|
||||
@classmethod
|
||||
def sundry_functions(cls) -> str:
|
||||
return super().sundry_functions() + '\n\n' + '''
|
||||
#
|
||||
# Temporarily disable Withdrawals functions for EIP4844 testnets
|
||||
#
|
||||
|
||||
|
||||
def no_op(fn): # type: ignore
|
||||
def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]:
|
||||
# pylint: disable=unused-argument
|
||||
def wrapper(*args, **kw): # type: ignore
|
||||
return None
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_empty_list_result(fn): # type: ignore
|
||||
# pylint: disable=unused-argument
|
||||
def wrapper(*args, **kw): # type: ignore
|
||||
return []
|
||||
return wrapper
|
||||
|
||||
|
||||
process_withdrawals = no_op(process_withdrawals)
|
||||
process_bls_to_execution_change = no_op(process_bls_to_execution_change)
|
||||
get_expected_withdrawals = get_empty_list_result(get_expected_withdrawals)
|
||||
|
||||
|
||||
#
|
||||
# End
|
||||
#
|
||||
|
||||
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]:
|
||||
# pylint: disable=unused-argument
|
||||
return "TEST"'''
|
||||
return ("TEST", "TEST")'''
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||
@@ -682,7 +669,7 @@ def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[Blobs
|
||||
|
||||
spec_builders = {
|
||||
builder.fork: builder
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, EIP4844SpecBuilder)
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder)
|
||||
}
|
||||
|
||||
|
||||
@@ -718,6 +705,7 @@ def objects_to_spec(preset_name: str,
|
||||
if k in [
|
||||
"ceillog2",
|
||||
"floorlog2",
|
||||
"compute_merkle_proof_for_block_body",
|
||||
"compute_merkle_proof_for_state",
|
||||
]:
|
||||
del spec_object.functions[k]
|
||||
@@ -980,14 +968,14 @@ class PySpecCommand(Command):
|
||||
if len(self.md_doc_paths) == 0:
|
||||
print("no paths were specified, using default markdown file paths for pyspec"
|
||||
" build (spec fork: %s)" % self.spec_fork)
|
||||
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844):
|
||||
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
"""
|
||||
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, EIP4844):
|
||||
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB):
|
||||
self.md_doc_paths += """
|
||||
specs/altair/light-client/full-node.md
|
||||
specs/altair/light-client/light-client.md
|
||||
@@ -999,7 +987,7 @@ class PySpecCommand(Command):
|
||||
specs/altair/validator.md
|
||||
specs/altair/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork in (BELLATRIX, CAPELLA, EIP4844):
|
||||
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB):
|
||||
self.md_doc_paths += """
|
||||
specs/bellatrix/beacon-chain.md
|
||||
specs/bellatrix/fork.md
|
||||
@@ -1008,21 +996,30 @@ class PySpecCommand(Command):
|
||||
specs/bellatrix/p2p-interface.md
|
||||
sync/optimistic.md
|
||||
"""
|
||||
if self.spec_fork in (CAPELLA, EIP4844):
|
||||
if self.spec_fork in (CAPELLA, DENEB):
|
||||
self.md_doc_paths += """
|
||||
specs/capella/light-client/fork.md
|
||||
specs/capella/light-client/full-node.md
|
||||
specs/capella/light-client/p2p-interface.md
|
||||
specs/capella/light-client/sync-protocol.md
|
||||
specs/capella/beacon-chain.md
|
||||
specs/capella/fork.md
|
||||
specs/capella/fork-choice.md
|
||||
specs/capella/validator.md
|
||||
specs/capella/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork == EIP4844:
|
||||
if self.spec_fork == DENEB:
|
||||
self.md_doc_paths += """
|
||||
specs/eip4844/beacon-chain.md
|
||||
specs/eip4844/fork.md
|
||||
specs/eip4844/polynomial-commitments.md
|
||||
specs/eip4844/p2p-interface.md
|
||||
specs/eip4844/validator.md
|
||||
specs/deneb/light-client/fork.md
|
||||
specs/deneb/light-client/full-node.md
|
||||
specs/deneb/light-client/p2p-interface.md
|
||||
specs/deneb/light-client/sync-protocol.md
|
||||
specs/deneb/beacon-chain.md
|
||||
specs/deneb/fork.md
|
||||
specs/deneb/fork-choice.md
|
||||
specs/deneb/polynomial-commitments.md
|
||||
specs/deneb/p2p-interface.md
|
||||
specs/deneb/validator.md
|
||||
"""
|
||||
if len(self.md_doc_paths) == 0:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
@@ -1172,7 +1169,7 @@ setup(
|
||||
"pycryptodome==3.15.0",
|
||||
"py_ecc==6.0.0",
|
||||
"milagro_bls_binding==1.9.0",
|
||||
"remerkleable==0.1.25",
|
||||
"remerkleable==0.1.27",
|
||||
"trie==2.0.2",
|
||||
RUAMEL_YAML_VERSION,
|
||||
"lru-dict==1.1.8",
|
||||
|
||||
@@ -36,11 +36,11 @@ docs are requisite for this document and used throughout. Please see the Custody
|
||||
|
||||
## Becoming a validator
|
||||
|
||||
Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details.
|
||||
Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#becoming-a-validator) for details.
|
||||
|
||||
## Beacon chain validator assignments
|
||||
|
||||
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details.
|
||||
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#validator-assignments) for details.
|
||||
|
||||
##### Custody slashings
|
||||
|
||||
@@ -143,7 +143,7 @@ If the node does not already have connected peers on the topic it needs to sampl
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||
Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||
| Name | Message Type |
|
||||
|----------------------------------|---------------------------|
|
||||
| `das_sample_{subnet_index}` | `DASSample` |
|
||||
@@ -192,7 +192,7 @@ This is to serve other peers that may have missed it.
|
||||
|
||||
To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain.
|
||||
|
||||
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md).
|
||||
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md).
|
||||
|
||||
Note that DAS networking uses a different protocol prefix: `/eth2/das/req`
|
||||
|
||||
@@ -39,7 +39,7 @@ The adjustments and additions for Shards are outlined in this document.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||
Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||
|
||||
| Name | Message Type |
|
||||
|---------------------------------|--------------------------|
|
||||
@@ -33,7 +33,7 @@ This document represents the changes to be made in the code of an "honest valida
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide.
|
||||
This document is an extension of the [Bellatrix -- Honest Validator](../../bellatrix/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
@@ -11,6 +11,7 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`compute_merkle_proof_for_state`](#compute_merkle_proof_for_state)
|
||||
- [`block_to_light_client_header`](#block_to_light_client_header)
|
||||
- [Deriving light client data](#deriving-light-client-data)
|
||||
- [`create_light_client_bootstrap`](#create_light_client_bootstrap)
|
||||
- [`create_light_client_update`](#create_light_client_update)
|
||||
@@ -34,6 +35,21 @@ def compute_merkle_proof_for_state(state: BeaconState,
|
||||
...
|
||||
```
|
||||
|
||||
### `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
## Deriving light client data
|
||||
|
||||
Full nodes are expected to derive light client data from historic blocks and states and provide it to other clients.
|
||||
@@ -55,13 +71,7 @@ def create_light_client_bootstrap(state: BeaconState,
|
||||
assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
|
||||
return LightClientBootstrap(
|
||||
header=BeaconBlockHeader(
|
||||
slot=state.latest_block_header.slot,
|
||||
proposer_index=state.latest_block_header.proposer_index,
|
||||
parent_root=state.latest_block_header.parent_root,
|
||||
state_root=hash_tree_root(state),
|
||||
body_root=state.latest_block_header.body_root,
|
||||
),
|
||||
header=block_to_light_client_header(block),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
)
|
||||
@@ -103,48 +113,36 @@ def create_light_client_update(state: BeaconState,
|
||||
assert hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root
|
||||
update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot)
|
||||
|
||||
update = LightClientUpdate()
|
||||
|
||||
update.attested_header = block_to_light_client_header(attested_block)
|
||||
|
||||
# `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
if update_attested_period == update_signature_period:
|
||||
next_sync_committee = attested_state.next_sync_committee
|
||||
next_sync_committee_branch = compute_merkle_proof_for_state(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
|
||||
else:
|
||||
next_sync_committee = SyncCommittee()
|
||||
next_sync_committee_branch = [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = compute_merkle_proof_for_state(
|
||||
attested_state, NEXT_SYNC_COMMITTEE_INDEX)
|
||||
|
||||
# Indicate finality whenever possible
|
||||
if finalized_block is not None:
|
||||
if finalized_block.message.slot != GENESIS_SLOT:
|
||||
finalized_header = BeaconBlockHeader(
|
||||
slot=finalized_block.message.slot,
|
||||
proposer_index=finalized_block.message.proposer_index,
|
||||
parent_root=finalized_block.message.parent_root,
|
||||
state_root=finalized_block.message.state_root,
|
||||
body_root=hash_tree_root(finalized_block.message.body),
|
||||
)
|
||||
assert hash_tree_root(finalized_header) == attested_state.finalized_checkpoint.root
|
||||
update.finalized_header = block_to_light_client_header(finalized_block)
|
||||
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
else:
|
||||
assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
finalized_header = BeaconBlockHeader()
|
||||
finality_branch = compute_merkle_proof_for_state(attested_state, FINALIZED_ROOT_INDEX)
|
||||
else:
|
||||
finalized_header = BeaconBlockHeader()
|
||||
finality_branch = [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
update.finality_branch = compute_merkle_proof_for_state(
|
||||
attested_state, FINALIZED_ROOT_INDEX)
|
||||
|
||||
return LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=block.message.body.sync_aggregate,
|
||||
signature_slot=block.message.slot,
|
||||
)
|
||||
update.sync_aggregate = block.message.body.sync_aggregate
|
||||
update.signature_slot = block.message.slot
|
||||
|
||||
return update
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `is_better_update`) for each sync committee period covering any epochs in range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientUpdate` for other sync committee periods.
|
||||
|
||||
- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.slot`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
|
||||
- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
|
||||
- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time.
|
||||
|
||||
### `create_light_client_finality_update`
|
||||
@@ -160,7 +158,7 @@ def create_light_client_finality_update(update: LightClientUpdate) -> LightClien
|
||||
)
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
|
||||
|
||||
### `create_light_client_optimistic_update`
|
||||
|
||||
@@ -173,4 +171,4 @@ def create_light_client_optimistic_update(update: LightClientUpdate) -> LightCli
|
||||
)
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes.
|
||||
Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes.
|
||||
|
||||
@@ -23,7 +23,7 @@ This document explains how light clients MAY obtain light client data to sync wi
|
||||
1. The light client MUST be configured out-of-band with a spec/preset (including fork schedule), with `genesis_state` (including `genesis_time` and `genesis_validators_root`), and with a trusted block root. The trusted block SHOULD be within the weak subjectivity period, and its root SHOULD be from a finalized `Checkpoint`.
|
||||
2. The local clock is initialized based on the configured `genesis_time`, and the current fork digest is determined to browse for and connect to relevant light client data providers.
|
||||
3. The light client fetches a [`LightClientBootstrap`](./sync-protocol.md#lightclientbootstrap) object for the configured trusted block root. The `bootstrap` object is passed to [`initialize_light_client_store`](./sync-protocol.md#initialize_light_client_store) to obtain a local [`LightClientStore`](./sync-protocol.md#lightclientstore).
|
||||
4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.slot`, `optimistic_period` from `store.optimistic_header.slot`, and `current_period` from `current_slot` based on the local clock.
|
||||
4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.beacon.slot`, `optimistic_period` from `store.optimistic_header.beacon.slot`, and `current_period` from `current_slot` based on the local clock.
|
||||
1. When `finalized_period == optimistic_period` and [`is_next_sync_committee_known`](./sync-protocol.md#is_next_sync_committee_known) indicates `False`, the light client fetches a [`LightClientUpdate`](./sync-protocol.md#lightclientupdate) for `finalized_period`. If `finalized_period == current_period`, this fetch SHOULD be scheduled at a random time before `current_period` advances.
|
||||
2. When `finalized_period + 1 < current_period`, the light client fetches a `LightClientUpdate` for each sync committee period in range `[finalized_period + 1, current_period)` (current period excluded)
|
||||
3. When `finalized_period + 1 >= current_period`, the light client keeps observing [`LightClientFinalityUpdate`](./sync-protocol.md#lightclientfinalityupdate) and [`LightClientOptimisticUpdate`](./sync-protocol.md#lightclientoptimisticupdate). Received objects are passed to [`process_light_client_finality_update`](./sync-protocol.md#process_light_client_finality_update) and [`process_light_client_optimistic_update`](./sync-protocol.md#process_light_client_optimistic_update). This ensures that `finalized_header` and `optimistic_header` reflect the latest blocks.
|
||||
|
||||
@@ -59,7 +59,7 @@ New global topics are added to provide light clients with the latest updates.
|
||||
This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `finality_update` on the network.
|
||||
- _[IGNORE]_ The `finalized_header.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
@@ -67,11 +67,11 @@ For full nodes, the following validations MUST additionally pass before forwardi
|
||||
|
||||
For light clients, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
- _[REJECT]_ The `finality_update` is valid -- i.e. validate that `process_light_client_finality_update` does not indicate errors
|
||||
- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.slot`
|
||||
- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.beacon.slot`
|
||||
|
||||
Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))`.
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -87,7 +87,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
This topic is used to propagate the latest `LightClientOptimisticUpdate` to light clients, allowing them to keep track of the latest `optimistic_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `optimistic_update` on the network.
|
||||
- _[IGNORE]_ The `attested_header.slot` is greater than that of all previously forwarded `optimistic_update`s
|
||||
- _[IGNORE]_ The `attested_header.beacon.slot` is greater than that of all previously forwarded `optimistic_update`s
|
||||
- _[IGNORE]_ The `optimistic_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `optimistic_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
|
||||
@@ -95,11 +95,11 @@ For full nodes, the following validations MUST additionally pass before forwardi
|
||||
|
||||
For light clients, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
|
||||
- _[REJECT]_ The `optimistic_update` is valid -- i.e. validate that `process_light_client_optimistic_update` does not indicate errors
|
||||
- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.slot`
|
||||
- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.beacon.slot`
|
||||
|
||||
Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))`.
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -142,7 +142,7 @@ Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./f
|
||||
|
||||
When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.slot))` is used to select the fork namespace of the Response type.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.beacon.slot))` is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -180,7 +180,7 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `r
|
||||
|
||||
Peers SHOULD provide results as defined in [`create_light_client_update`](./full-node.md#create_light_client_update). They MUST respond with at least the earliest known result within the requested range, and MUST send results in consecutive order (by period). The response MUST NOT contain more than `min(MAX_REQUEST_LIGHT_CLIENT_UPDATES, count)` results.
|
||||
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`.
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -211,7 +211,7 @@ Peers SHOULD provide results as defined in [`create_light_client_finality_update
|
||||
|
||||
When no `LightClientFinalityUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -242,7 +242,7 @@ Peers SHOULD provide results as defined in [`create_light_client_optimistic_upda
|
||||
|
||||
When no `LightClientOptimisticUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -255,7 +255,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
## Light clients
|
||||
|
||||
Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimistic` structures.
|
||||
Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimisticUpdate` structures.
|
||||
|
||||
Light clients MAY also collect historic light client data and make it available to other peers. If they do, they SHOULD advertise supported message endpoints in [the Req/Resp domain](#the-reqresp-domain), and MAY also update the contents of their [`Status`](../../phase0/p2p-interface.md#status) message to reflect the locally available light client data.
|
||||
|
||||
@@ -273,7 +273,7 @@ All full nodes SHOULD subscribe to and provide stability on the [`light_client_f
|
||||
|
||||
Whenever fork choice selects a new head block with a sync aggregate participation `>= MIN_SYNC_COMMITTEE_PARTICIPANTS` and a post-Altair parent block, full nodes with at least one validator assigned to the current sync committee at the block's `slot` SHOULD broadcast derived light client data as follows:
|
||||
|
||||
- If `finalized_header.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `attested_header.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `finalized_header.beacon.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `attested_header.beacon.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
|
||||
These messages SHOULD be broadcasted after one-third of `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot). To ensure that the corresponding block was given enough time to propagate through the network, they SHOULD NOT be sent earlier. Note that this is different from how other messages are handled, e.g., attestations, which may be sent early.
|
||||
|
||||
@@ -13,12 +13,14 @@
|
||||
- [Preset](#preset)
|
||||
- [Misc](#misc)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientHeader`](#lightclientheader)
|
||||
- [`LightClientBootstrap`](#lightclientbootstrap)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [`LightClientFinalityUpdate`](#lightclientfinalityupdate)
|
||||
- [`LightClientOptimisticUpdate`](#lightclientoptimisticupdate)
|
||||
- [`LightClientStore`](#lightclientstore)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`is_valid_light_client_header`](#is_valid_light_client_header)
|
||||
- [`is_sync_committee_update`](#is_sync_committee_update)
|
||||
- [`is_finality_update`](#is_finality_update)
|
||||
- [`is_better_update`](#is_better_update)
|
||||
@@ -73,13 +75,23 @@ Additional documents describe how the light client sync protocol can be used:
|
||||
|
||||
## Containers
|
||||
|
||||
### `LightClientHeader`
|
||||
|
||||
```python
|
||||
class LightClientHeader(Container):
|
||||
# Beacon block header
|
||||
beacon: BeaconBlockHeader
|
||||
```
|
||||
|
||||
Future upgrades may introduce additional fields to this structure, and validate them by extending [`is_valid_light_client_header`](#is_valid_light_client_header).
|
||||
|
||||
### `LightClientBootstrap`
|
||||
|
||||
```python
|
||||
class LightClientBootstrap(Container):
|
||||
# Header matching the requested beacon block root
|
||||
header: BeaconBlockHeader
|
||||
# Current sync committee corresponding to `header.state_root`
|
||||
header: LightClientHeader
|
||||
# Current sync committee corresponding to `header.beacon.state_root`
|
||||
current_sync_committee: SyncCommittee
|
||||
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
|
||||
```
|
||||
@@ -89,12 +101,12 @@ class LightClientBootstrap(Container):
|
||||
```python
|
||||
class LightClientUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to `attested_header.state_root`
|
||||
attested_header: LightClientHeader
|
||||
# Next sync committee corresponding to `attested_header.beacon.state_root`
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
@@ -107,9 +119,9 @@ class LightClientUpdate(Container):
|
||||
```python
|
||||
class LightClientFinalityUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
attested_header: LightClientHeader
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
@@ -122,7 +134,7 @@ class LightClientFinalityUpdate(Container):
|
||||
```python
|
||||
class LightClientOptimisticUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
attested_header: LightClientHeader
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
# Slot at which the aggregate signature was created (untrusted)
|
||||
@@ -135,14 +147,14 @@ class LightClientOptimisticUpdate(Container):
|
||||
@dataclass
|
||||
class LightClientStore(object):
|
||||
# Header that is finalized
|
||||
finalized_header: BeaconBlockHeader
|
||||
finalized_header: LightClientHeader
|
||||
# Sync committees corresponding to the finalized header
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Best available header to switch finalized head to if we see nothing else
|
||||
best_valid_update: Optional[LightClientUpdate]
|
||||
# Most recent available reasonably-safe header
|
||||
optimistic_header: BeaconBlockHeader
|
||||
optimistic_header: LightClientHeader
|
||||
# Max number of active participants in a sync committee (used to calculate safety threshold)
|
||||
previous_max_active_participants: uint64
|
||||
current_max_active_participants: uint64
|
||||
@@ -150,6 +162,14 @@ class LightClientStore(object):
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
```
|
||||
|
||||
### `is_sync_committee_update`
|
||||
|
||||
```python
|
||||
@@ -181,11 +201,11 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
||||
|
||||
# Compare presence of relevant sync committee
|
||||
new_has_relevant_sync_committee = is_sync_committee_update(new_update) and (
|
||||
compute_sync_committee_period_at_slot(new_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.signature_slot)
|
||||
)
|
||||
old_has_relevant_sync_committee = is_sync_committee_update(old_update) and (
|
||||
compute_sync_committee_period_at_slot(old_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.signature_slot)
|
||||
)
|
||||
if new_has_relevant_sync_committee != old_has_relevant_sync_committee:
|
||||
@@ -200,12 +220,12 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
||||
# Compare sync committee finality
|
||||
if new_has_finality:
|
||||
new_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(new_update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(new_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
)
|
||||
old_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(old_update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(old_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
)
|
||||
if new_has_sync_committee_finality != old_has_sync_committee_finality:
|
||||
return new_has_sync_committee_finality
|
||||
@@ -215,8 +235,8 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
||||
return new_num_active_participants > old_num_active_participants
|
||||
|
||||
# Tiebreaker 2: Prefer older data (fewer changes to best)
|
||||
if new_update.attested_header.slot != old_update.attested_header.slot:
|
||||
return new_update.attested_header.slot < old_update.attested_header.slot
|
||||
if new_update.attested_header.beacon.slot != old_update.attested_header.beacon.slot:
|
||||
return new_update.attested_header.beacon.slot < old_update.attested_header.beacon.slot
|
||||
return new_update.signature_slot < old_update.signature_slot
|
||||
```
|
||||
|
||||
@@ -260,14 +280,15 @@ A light client maintains its state in a `store` object of type `LightClientStore
|
||||
```python
|
||||
def initialize_light_client_store(trusted_block_root: Root,
|
||||
bootstrap: LightClientBootstrap) -> LightClientStore:
|
||||
assert hash_tree_root(bootstrap.header) == trusted_block_root
|
||||
assert is_valid_light_client_header(bootstrap.header)
|
||||
assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root
|
||||
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(bootstrap.current_sync_committee),
|
||||
branch=bootstrap.current_sync_committee_branch,
|
||||
depth=floorlog2(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
root=bootstrap.header.state_root,
|
||||
root=bootstrap.header.beacon.state_root,
|
||||
)
|
||||
|
||||
return LightClientStore(
|
||||
@@ -301,8 +322,11 @@ def validate_light_client_update(store: LightClientStore,
|
||||
assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify update does not skip a sync committee period
|
||||
assert current_slot >= update.signature_slot > update.attested_header.slot >= update.finalized_header.slot
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
|
||||
assert is_valid_light_client_header(update.attested_header)
|
||||
update_attested_slot = update.attested_header.beacon.slot
|
||||
update_finalized_slot = update.finalized_header.beacon.slot
|
||||
assert current_slot >= update.signature_slot > update_attested_slot >= update_finalized_slot
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot)
|
||||
update_signature_period = compute_sync_committee_period_at_slot(update.signature_slot)
|
||||
if is_next_sync_committee_known(store):
|
||||
assert update_signature_period in (store_period, store_period + 1)
|
||||
@@ -310,12 +334,12 @@ def validate_light_client_update(store: LightClientStore,
|
||||
assert update_signature_period == store_period
|
||||
|
||||
# Verify update is relevant
|
||||
update_attested_period = compute_sync_committee_period_at_slot(update.attested_header.slot)
|
||||
update_attested_period = compute_sync_committee_period_at_slot(update_attested_slot)
|
||||
update_has_next_sync_committee = not is_next_sync_committee_known(store) and (
|
||||
is_sync_committee_update(update) and update_attested_period == store_period
|
||||
)
|
||||
assert (
|
||||
update.attested_header.slot > store.finalized_header.slot
|
||||
update_attested_slot > store.finalized_header.beacon.slot
|
||||
or update_has_next_sync_committee
|
||||
)
|
||||
|
||||
@@ -323,19 +347,20 @@ def validate_light_client_update(store: LightClientStore,
|
||||
# to match the finalized checkpoint root saved in the state of `attested_header`.
|
||||
# Note that the genesis finalized checkpoint root is represented as a zero hash.
|
||||
if not is_finality_update(update):
|
||||
assert update.finalized_header == BeaconBlockHeader()
|
||||
assert update.finalized_header == LightClientHeader()
|
||||
else:
|
||||
if update.finalized_header.slot == GENESIS_SLOT:
|
||||
assert update.finalized_header == BeaconBlockHeader()
|
||||
if update_finalized_slot == GENESIS_SLOT:
|
||||
assert update.finalized_header == LightClientHeader()
|
||||
finalized_root = Bytes32()
|
||||
else:
|
||||
finalized_root = hash_tree_root(update.finalized_header)
|
||||
assert is_valid_light_client_header(update.finalized_header)
|
||||
finalized_root = hash_tree_root(update.finalized_header.beacon)
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=finalized_root,
|
||||
branch=update.finality_branch,
|
||||
depth=floorlog2(FINALIZED_ROOT_INDEX),
|
||||
index=get_subtree_index(FINALIZED_ROOT_INDEX),
|
||||
root=update.attested_header.state_root,
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
# Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the
|
||||
@@ -350,7 +375,7 @@ def validate_light_client_update(store: LightClientStore,
|
||||
branch=update.next_sync_committee_branch,
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=update.attested_header.state_root,
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
# Verify sync committee aggregate signature
|
||||
@@ -364,7 +389,7 @@ def validate_light_client_update(store: LightClientStore,
|
||||
]
|
||||
fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
|
||||
signing_root = compute_signing_root(update.attested_header, domain)
|
||||
signing_root = compute_signing_root(update.attested_header.beacon, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
```
|
||||
|
||||
@@ -372,8 +397,8 @@ def validate_light_client_update(store: LightClientStore,
|
||||
|
||||
```python
|
||||
def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.slot)
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
if not is_next_sync_committee_known(store):
|
||||
assert update_finalized_period == store_period
|
||||
store.next_sync_committee = update.next_sync_committee
|
||||
@@ -382,9 +407,9 @@ def apply_light_client_update(store: LightClientStore, update: LightClientUpdate
|
||||
store.next_sync_committee = update.next_sync_committee
|
||||
store.previous_max_active_participants = store.current_max_active_participants
|
||||
store.current_max_active_participants = 0
|
||||
if update.finalized_header.slot > store.finalized_header.slot:
|
||||
if update.finalized_header.beacon.slot > store.finalized_header.beacon.slot:
|
||||
store.finalized_header = update.finalized_header
|
||||
if store.finalized_header.slot > store.optimistic_header.slot:
|
||||
if store.finalized_header.beacon.slot > store.optimistic_header.beacon.slot:
|
||||
store.optimistic_header = store.finalized_header
|
||||
```
|
||||
|
||||
@@ -393,14 +418,14 @@ def apply_light_client_update(store: LightClientStore, update: LightClientUpdate
|
||||
```python
|
||||
def process_light_client_store_force_update(store: LightClientStore, current_slot: Slot) -> None:
|
||||
if (
|
||||
current_slot > store.finalized_header.slot + UPDATE_TIMEOUT
|
||||
current_slot > store.finalized_header.beacon.slot + UPDATE_TIMEOUT
|
||||
and store.best_valid_update is not None
|
||||
):
|
||||
# Forced best update when the update timeout has elapsed.
|
||||
# Because the apply logic waits for `finalized_header.slot` to indicate sync committee finality,
|
||||
# Because the apply logic waits for `finalized_header.beacon.slot` to indicate sync committee finality,
|
||||
# the `attested_header` may be treated as `finalized_header` in extended periods of non-finality
|
||||
# to guarantee progression into later sync committee periods according to `is_better_update`.
|
||||
if store.best_valid_update.finalized_header.slot <= store.finalized_header.slot:
|
||||
if store.best_valid_update.finalized_header.beacon.slot <= store.finalized_header.beacon.slot:
|
||||
store.best_valid_update.finalized_header = store.best_valid_update.attested_header
|
||||
apply_light_client_update(store, store.best_valid_update)
|
||||
store.best_valid_update = None
|
||||
@@ -433,7 +458,7 @@ def process_light_client_update(store: LightClientStore,
|
||||
# Update the optimistic header
|
||||
if (
|
||||
sum(sync_committee_bits) > get_safety_threshold(store)
|
||||
and update.attested_header.slot > store.optimistic_header.slot
|
||||
and update.attested_header.beacon.slot > store.optimistic_header.beacon.slot
|
||||
):
|
||||
store.optimistic_header = update.attested_header
|
||||
|
||||
@@ -441,14 +466,14 @@ def process_light_client_update(store: LightClientStore,
|
||||
update_has_finalized_next_sync_committee = (
|
||||
not is_next_sync_committee_known(store)
|
||||
and is_sync_committee_update(update) and is_finality_update(update) and (
|
||||
compute_sync_committee_period_at_slot(update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(update.attested_header.beacon.slot)
|
||||
)
|
||||
)
|
||||
if (
|
||||
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
|
||||
and (
|
||||
update.finalized_header.slot > store.finalized_header.slot
|
||||
update.finalized_header.beacon.slot > store.finalized_header.beacon.slot
|
||||
or update_has_finalized_next_sync_committee
|
||||
)
|
||||
):
|
||||
@@ -487,7 +512,7 @@ def process_light_client_optimistic_update(store: LightClientStore,
|
||||
attested_header=optimistic_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
|
||||
finalized_header=BeaconBlockHeader(),
|
||||
finalized_header=LightClientHeader(),
|
||||
finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))],
|
||||
sync_aggregate=optimistic_update.sync_aggregate,
|
||||
signature_slot=optimistic_update.signature_slot,
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
- [`Withdrawal`](#withdrawal)
|
||||
- [`BLSToExecutionChange`](#blstoexecutionchange)
|
||||
- [`SignedBLSToExecutionChange`](#signedblstoexecutionchange)
|
||||
- [`HistoricalSummary`](#historicalsummary)
|
||||
- [Extended Containers](#extended-containers)
|
||||
- [`ExecutionPayload`](#executionpayload)
|
||||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
@@ -29,6 +30,8 @@
|
||||
- [`is_fully_withdrawable_validator`](#is_fully_withdrawable_validator)
|
||||
- [`is_partially_withdrawable_validator`](#is_partially_withdrawable_validator)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Historical summaries updates](#historical-summaries-updates)
|
||||
- [Block processing](#block-processing)
|
||||
- [New `get_expected_withdrawals`](#new-get_expected_withdrawals)
|
||||
- [New `process_withdrawals`](#new-process_withdrawals)
|
||||
@@ -50,6 +53,11 @@ to validator withdrawals. Including:
|
||||
* Operation to change from `BLS_WITHDRAWAL_PREFIX` to
|
||||
`ETH1_ADDRESS_WITHDRAWAL_PREFIX` versioned withdrawal credentials to enable withdrawals for a validator.
|
||||
|
||||
Another new feature is the new independent state and block historical accumulators
|
||||
that replace the original singular historical roots. With these accumulators, it becomes possible to validate
|
||||
the entire block history that led up to that particular state without any additional information
|
||||
beyond the state and the blocks.
|
||||
|
||||
## Custom types
|
||||
|
||||
We define the following Python custom types for type hinting and readability:
|
||||
@@ -115,6 +123,18 @@ class SignedBLSToExecutionChange(Container):
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
#### `HistoricalSummary`
|
||||
|
||||
```python
|
||||
class HistoricalSummary(Container):
|
||||
"""
|
||||
`HistoricalSummary` matches the components of the phase0 `HistoricalBatch`
|
||||
making the two hash_tree_root-compatible.
|
||||
"""
|
||||
block_summary_root: Root
|
||||
state_summary_root: Root
|
||||
```
|
||||
|
||||
### Extended Containers
|
||||
|
||||
#### `ExecutionPayload`
|
||||
@@ -196,7 +216,7 @@ class BeaconState(Container):
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Frozen in Capella, replaced by historical_summaries
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
@@ -222,10 +242,12 @@ class BeaconState(Container):
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Execution
|
||||
latest_execution_payload_header: ExecutionPayloadHeader
|
||||
latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Capella]
|
||||
# Withdrawals
|
||||
next_withdrawal_index: WithdrawalIndex # [New in Capella]
|
||||
next_withdrawal_validator_index: ValidatorIndex # [New in Capella]
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] # [New in Capella]
|
||||
```
|
||||
|
||||
## Helpers
|
||||
@@ -270,6 +292,40 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Epoch processing
|
||||
|
||||
*Note*: The function `process_historical_summaries_update` replaces `process_historical_roots_update` in Bellatrix.
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_inactivity_updates(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_summaries_update(state) # [Modified in Capella]
|
||||
process_participation_flag_updates(state)
|
||||
process_sync_committee_updates(state)
|
||||
```
|
||||
|
||||
#### Historical summaries updates
|
||||
|
||||
```python
|
||||
def process_historical_summaries_update(state: BeaconState) -> None:
|
||||
# Set historical block root accumulator.
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_summary = HistoricalSummary(
|
||||
block_summary_root=hash_tree_root(state.block_roots),
|
||||
state_summary_root=hash_tree_root(state.state_roots),
|
||||
)
|
||||
state.historical_summaries.append(historical_summary)
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
@@ -416,7 +472,8 @@ def process_bls_to_execution_change(state: BeaconState,
|
||||
assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
|
||||
assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
|
||||
|
||||
domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
# Fork-agnostic domain since address changes are valid across forks
|
||||
domain = compute_domain(DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root)
|
||||
signing_root = compute_signing_root(address_change, domain)
|
||||
assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
|
||||
|
||||
|
||||
@@ -129,8 +129,10 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=WithdrawalIndex(0),
|
||||
next_withdrawal_validator_index=ValidatorIndex(0),
|
||||
next_withdrawal_index=WithdrawalIndex(0), # [New in Capella]
|
||||
next_withdrawal_validator_index=ValidatorIndex(0), # [New in Capella]
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]), # [New in Capella]
|
||||
)
|
||||
|
||||
return post
|
||||
|
||||
92
specs/capella/light-client/fork.md
Normal file
92
specs/capella/light-client/fork.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Capella Light Client -- Fork Logic
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Upgrading light client data](#upgrading-light-client-data)
|
||||
- [Upgrading the store](#upgrading-the-store)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to upgrade existing light client objects based on the [Altair specification](../../altair/light-client/sync-protocol.md) to Capella. This is necessary when processing pre-Capella data with a post-Capella `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
|
||||
|
||||
### Upgrading light client data
|
||||
|
||||
A Capella `LightClientStore` can still process earlier light client data. In order to do so, that pre-Capella data needs to be locally upgraded to Capella before processing.
|
||||
|
||||
```python
|
||||
def upgrade_lc_header_to_capella(pre: bellatrix.LightClientHeader) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=pre.beacon,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_bootstrap_to_capella(pre: bellatrix.LightClientBootstrap) -> LightClientBootstrap:
|
||||
return LightClientBootstrap(
|
||||
header=upgrade_lc_header_to_capella(pre.header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
current_sync_committee_branch=pre.current_sync_committee_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_update_to_capella(pre: bellatrix.LightClientUpdate) -> LightClientUpdate:
|
||||
return LightClientUpdate(
|
||||
attested_header=upgrade_lc_header_to_capella(pre.attested_header),
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
next_sync_committee_branch=pre.next_sync_committee_branch,
|
||||
finalized_header=upgrade_lc_header_to_capella(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_capella(pre: bellatrix.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_capella(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_capella(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_capella(pre: bellatrix.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_capella(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
### Upgrading the store
|
||||
|
||||
Existing `LightClientStore` objects based on Altair MUST be upgraded to Capella before Capella based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `CAPELLA_FORK_EPOCH`.
|
||||
|
||||
```python
|
||||
def upgrade_lc_store_to_capella(pre: bellatrix.LightClientStore) -> LightClientStore:
|
||||
if pre.best_valid_update is None:
|
||||
best_valid_update = None
|
||||
else:
|
||||
best_valid_update = upgrade_lc_update_to_capella(pre.best_valid_update)
|
||||
return LightClientStore(
|
||||
finalized_header=upgrade_lc_header_to_capella(pre.finalized_header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
best_valid_update=best_valid_update,
|
||||
optimistic_header=upgrade_lc_header_to_capella(pre.optimistic_header),
|
||||
previous_max_active_participants=pre.previous_max_active_participants,
|
||||
current_max_active_participants=pre.current_max_active_participants,
|
||||
)
|
||||
```
|
||||
78
specs/capella/light-client/full-node.md
Normal file
78
specs/capella/light-client/full-node.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Capella Light Client -- Full Node
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`compute_merkle_proof_for_block_body`](#compute_merkle_proof_for_block_body)
|
||||
- [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds information about the execution payload to light client data as part of the Capella upgrade.
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `compute_merkle_proof_for_block_body`
|
||||
|
||||
```python
|
||||
def compute_merkle_proof_for_block_body(body: BeaconBlockBody,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
...
|
||||
```
|
||||
|
||||
### Modified `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
epoch = compute_epoch_at_slot(block.message.slot)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
payload = block.message.body.execution_payload
|
||||
execution_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipts_root=payload.receipts_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
prev_randao=payload.prev_randao,
|
||||
block_number=payload.block_number,
|
||||
gas_limit=payload.gas_limit,
|
||||
gas_used=payload.gas_used,
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
execution=execution_header,
|
||||
execution_branch=execution_branch,
|
||||
)
|
||||
```
|
||||
99
specs/capella/light-client/p2p-interface.md
Normal file
99
specs/capella/light-client/p2p-interface.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Capella Light Client -- Networking
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Networking](#networking)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`light_client_finality_update`](#light_client_finality_update)
|
||||
- [`light_client_optimistic_update`](#light_client_optimistic_update)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [GetLightClientBootstrap](#getlightclientbootstrap)
|
||||
- [LightClientUpdatesByRange](#lightclientupdatesbyrange)
|
||||
- [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
|
||||
- [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Networking
|
||||
|
||||
The [Altair light client networking specification](../../altair/light-client/p2p-interface.md) is extended to exchange [Capella light client data](./sync-protocol.md).
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
#### Topics and messages
|
||||
|
||||
##### Global topics
|
||||
|
||||
###### `light_client_finality_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientFinalityUpdate` |
|
||||
|
||||
###### `light_client_optimistic_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientOptimisticUpdate` |
|
||||
|
||||
### The Req/Resp domain
|
||||
|
||||
#### Messages
|
||||
|
||||
##### GetLightClientBootstrap
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientBootstrap` |
|
||||
|
||||
##### LightClientUpdatesByRange
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response chunk SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientUpdate` |
|
||||
|
||||
##### GetLightClientFinalityUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientFinalityUpdate` |
|
||||
|
||||
##### GetLightClientOptimisticUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
| ------------------------------------------------------ | ------------------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` and later | `capella.LightClientOptimisticUpdate` |
|
||||
82
specs/capella/light-client/sync-protocol.md
Normal file
82
specs/capella/light-client/sync-protocol.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Capella Light Client -- Sync Protocol
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Containers](#containers)
|
||||
- [Modified `LightClientHeader`](#modified-lightclientheader)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`get_lc_execution_root`](#get_lc_execution_root)
|
||||
- [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds information about the execution payload to light client data as part of the Capella upgrade. It extends the [Altair Light Client specifications](../../altair/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Altair based deployments to Capella.
|
||||
|
||||
Additional documents describes the impact of the upgrade on certain roles:
|
||||
- [Full node](./full-node.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EXECUTION_PAYLOAD_INDEX` | `get_generalized_index(BeaconBlockBody, 'execution_payload')` (= 25) |
|
||||
|
||||
## Containers
|
||||
|
||||
### Modified `LightClientHeader`
|
||||
|
||||
```python
|
||||
class LightClientHeader(Container):
|
||||
# Beacon block header
|
||||
beacon: BeaconBlockHeader
|
||||
# Execution payload header corresponding to `beacon.body_root` (from Capella onward)
|
||||
execution: ExecutionPayloadHeader
|
||||
execution_branch: Vector[Bytes32, floorlog2(EXECUTION_PAYLOAD_INDEX)]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `get_lc_execution_root`
|
||||
|
||||
```python
|
||||
def get_lc_execution_root(header: LightClientHeader) -> Root:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
return hash_tree_root(header.execution)
|
||||
|
||||
return Root()
|
||||
```
|
||||
|
||||
### Modified `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
@@ -61,6 +61,8 @@ This topic is used to propagate signed bls to execution change messages to be in
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_bls_to_execution_change` on the network:
|
||||
|
||||
- _[IGNORE]_ `current_epoch >= CAPELLA_FORK_EPOCH`,
|
||||
where `current_epoch` is defined by the current wall-clock time.
|
||||
- _[IGNORE]_ The `signed_bls_to_execution_change` is the first valid signed bls to execution change received
|
||||
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# EIP-4844 -- The Beacon Chain
|
||||
# Deneb -- The Beacon Chain
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Domain types](#domain-types)
|
||||
- [Blob](#blob)
|
||||
- [Preset](#preset)
|
||||
- [Execution](#execution)
|
||||
@@ -22,8 +23,6 @@
|
||||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [`validate_blobs_sidecar`](#validate_blobs_sidecar)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash)
|
||||
- [`tx_peek_blob_versioned_hashes`](#tx_peek_blob_versioned_hashes)
|
||||
- [`verify_kzg_commitments_against_transactions`](#verify_kzg_commitments_against_transactions)
|
||||
@@ -33,29 +32,35 @@
|
||||
- [`process_execution_payload`](#process_execution_payload)
|
||||
- [Blob KZG commitments](#blob-kzg-commitments)
|
||||
- [Testing](#testing)
|
||||
- [Disabling Withdrawals](#disabling-withdrawals)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an extension of the Capella upgrade.
|
||||
This upgrade adds blobs to the beacon chain as part of Deneb. This is an extension of the Capella upgrade.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `VersionedHash` | `Bytes32` | |
|
||||
| `BlobIndex` | `uint64` | |
|
||||
|
||||
## Constants
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BLOB_SIDECAR` | `DomainType('0x0B000000')` |
|
||||
|
||||
### Blob
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLOB_TX_TYPE` | `uint8(0x05)` |
|
||||
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
|
||||
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
|
||||
|
||||
## Preset
|
||||
|
||||
@@ -89,9 +94,9 @@ class BeaconBlockBody(Container):
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload # [Modified in EIP-4844]
|
||||
execution_payload: ExecutionPayload # [Modified in Deneb]
|
||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in Deneb]
|
||||
```
|
||||
|
||||
#### `ExecutionPayload`
|
||||
@@ -111,11 +116,11 @@ class ExecutionPayload(Container):
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
excess_data_gas: uint256 # [New in EIP-4844]
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
excess_data_gas: uint256 # [New in Deneb]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
@@ -135,57 +140,17 @@ class ExecutionPayloadHeader(Container):
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
excess_data_gas: uint256 # [New in EIP-4844]
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
excess_data_gas: uint256 # [New in Deneb]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### `validate_blobs_sidecar`
|
||||
|
||||
```python
|
||||
def validate_blobs_sidecar(slot: Slot,
|
||||
beacon_block_root: Root,
|
||||
expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs_sidecar: BlobsSidecar) -> None:
|
||||
assert slot == blobs_sidecar.beacon_block_slot
|
||||
assert beacon_block_root == blobs_sidecar.beacon_block_root
|
||||
blobs = blobs_sidecar.blobs
|
||||
kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
|
||||
assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof)
|
||||
```
|
||||
|
||||
#### `is_data_available`
|
||||
|
||||
The implementation of `is_data_available` will become more sophisticated during later sharding upgrades.
|
||||
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
|
||||
and validate the sidecar with `validate_blobs_sidecar`.
|
||||
|
||||
The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobsSidecar` has subsequently been pruned.
|
||||
|
||||
```python
|
||||
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
# `retrieve_blobs_sidecar` is implementation and context dependent, raises an exception if not available.
|
||||
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
|
||||
|
||||
# For testing, `retrieve_blobs_sidecar` returns "TEST".
|
||||
# TODO: Remove it once we have a way to inject `BlobsSidecar` into tests.
|
||||
if isinstance(sidecar, str):
|
||||
return True
|
||||
|
||||
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
|
||||
return True
|
||||
```
|
||||
|
||||
|
||||
#### `kzg_commitment_to_versioned_hash`
|
||||
|
||||
```python
|
||||
@@ -195,7 +160,7 @@ def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> Versioned
|
||||
|
||||
#### `tx_peek_blob_versioned_hashes`
|
||||
|
||||
This function retrieves the hashes from the `SignedBlobTransaction` as defined in EIP-4844, using SSZ offsets.
|
||||
This function retrieves the hashes from the `SignedBlobTransaction` as defined in Deneb, using SSZ offsets.
|
||||
Offsets are little-endian `uint32` values, as defined in the [SSZ specification](../../ssz/simple-serialize.md).
|
||||
See [the full details of `blob_versioned_hashes` offset calculation](https://gist.github.com/protolambda/23bd106b66f6d4bb854ce46044aa3ca3).
|
||||
|
||||
@@ -235,15 +200,12 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-4844]
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in Deneb]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(state, block.body) # [New in EIP-4844]
|
||||
|
||||
# New in EIP-4844
|
||||
assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments)
|
||||
process_blob_kzg_commitments(state, block.body) # [New in Deneb]
|
||||
```
|
||||
|
||||
#### Execution payload
|
||||
@@ -276,10 +238,10 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
excess_data_gas=payload.excess_data_gas, # [New in EIP-4844]
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
excess_data_gas=payload.excess_data_gas, # [New in Deneb]
|
||||
)
|
||||
```
|
||||
|
||||
@@ -293,9 +255,9 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only.
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only.
|
||||
|
||||
The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type
|
||||
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
|
||||
when initializing the first body-root.
|
||||
|
||||
```python
|
||||
@@ -305,8 +267,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844] for testing only
|
||||
current_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844]
|
||||
previous_version=DENEB_FORK_VERSION, # [Modified in Deneb] for testing only
|
||||
current_version=DENEB_FORK_VERSION, # [Modified in Deneb]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
@@ -346,11 +308,3 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
|
||||
return state
|
||||
```
|
||||
|
||||
### Disabling Withdrawals
|
||||
|
||||
During testing we avoid Capella-specific updates to the state transition. We do this by replacing the following functions with a no-op implementation:
|
||||
- `process_withdrawals`
|
||||
- `process_bls_to_execution_change`
|
||||
|
||||
The `get_expected_withdrawals` function is also modified to return an empty withdrawals list. As such, the `PayloadAttributes` used to update forkchoice does not contain withdrawals.
|
||||
122
specs/deneb/fork-choice.md
Normal file
122
specs/deneb/fork-choice.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Deneb -- Fork Choice
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Containers](#containers)
|
||||
- [Helpers](#helpers)
|
||||
- [`validate_blobs`](#validate_blobs)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
|
||||
- [`on_block`](#on_block)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This is the modification of the fork choice accompanying the Deneb upgrade.
|
||||
|
||||
## Containers
|
||||
|
||||
## Helpers
|
||||
|
||||
#### `validate_blobs`
|
||||
|
||||
```python
|
||||
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs: Sequence[Blob],
|
||||
proofs: Sequence[KZGProof]) -> None:
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
assert len(blobs) == len(proofs)
|
||||
|
||||
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
|
||||
```
|
||||
|
||||
#### `is_data_available`
|
||||
|
||||
The implementation of `is_data_available` will become more sophisticated during later scaling upgrades.
|
||||
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`.
|
||||
|
||||
The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned.
|
||||
|
||||
```python
|
||||
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
# `retrieve_blobs_and_proofs` is implementation and context dependent
|
||||
# It returns all the blobs for the given block root, and raises an exception if not available
|
||||
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
|
||||
|
||||
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
|
||||
# TODO: Remove it once we have a way to inject `BlobSidecar` into tests.
|
||||
if isinstance(blobs, str) or isinstance(proofs, str):
|
||||
return True
|
||||
|
||||
validate_blobs(blob_kzg_commitments, blobs, proofs)
|
||||
return True
|
||||
```
|
||||
|
||||
## Updated fork-choice handlers
|
||||
|
||||
### `on_block`
|
||||
|
||||
*Note*: The only modification is the addition of the verification of transition block conditions.
|
||||
|
||||
```python
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
"""
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
assert block.parent_root in store.block_states
|
||||
# Make a copy of the state to avoid mutability issues
|
||||
pre_state = copy(store.block_states[block.parent_root])
|
||||
# Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past.
|
||||
assert get_current_slot(store) >= block.slot
|
||||
|
||||
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert block.slot > finalized_slot
|
||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
|
||||
# [New in Deneb]
|
||||
# Check if blob data is available
|
||||
# If not, this block MAY be queued and subsequently considered when blob data becomes available
|
||||
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
state_transition(state, signed_block, True)
|
||||
|
||||
# Check the merge transition
|
||||
if is_merge_transition_block(pre_state, block.body):
|
||||
validate_merge_block(block)
|
||||
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
```
|
||||
@@ -1,4 +1,4 @@
|
||||
# EIP-4844 -- Fork Logic
|
||||
# Deneb -- Fork Logic
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [Modified `compute_fork_version`](#modified-compute_fork_version)
|
||||
- [Fork to EIP-4844](#fork-to-eip-4844)
|
||||
- [Fork to Deneb](#fork-to-deneb)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of EIP-4844 upgrade.
|
||||
This document describes the process of Deneb upgrade.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -28,8 +28,8 @@ Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EIP4844_FORK_VERSION` | `Version('0x04000000')` |
|
||||
| `EIP4844_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
| `DENEB_FORK_VERSION` | `Version('0x04000000')` |
|
||||
| `DENEB_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
|
||||
## Helper functions
|
||||
|
||||
@@ -42,8 +42,8 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
||||
"""
|
||||
Return the fork version at the given ``epoch``.
|
||||
"""
|
||||
if epoch >= EIP4844_FORK_EPOCH:
|
||||
return EIP4844_FORK_VERSION
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return DENEB_FORK_VERSION
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
return CAPELLA_FORK_VERSION
|
||||
if epoch >= BELLATRIX_FORK_EPOCH:
|
||||
@@ -53,21 +53,21 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
||||
return GENESIS_FORK_VERSION
|
||||
```
|
||||
|
||||
## Fork to EIP-4844
|
||||
## Fork to Deneb
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
|
||||
For now, we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
|
||||
For now, we assume the condition will be triggered at epoch `DENEB_FORK_EPOCH`.
|
||||
|
||||
Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` since it starts with EIP-4844 version logic.
|
||||
Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since it starts with Deneb version logic.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
Since the `eip4844.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
|
||||
Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
|
||||
|
||||
```python
|
||||
def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
||||
def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
|
||||
epoch = capella.get_current_epoch(pre)
|
||||
latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||
@@ -82,7 +82,7 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
||||
timestamp=pre.latest_execution_payload_header.timestamp,
|
||||
extra_data=pre.latest_execution_payload_header.extra_data,
|
||||
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
excess_data_gas=uint256(0), # [New in EIP-4844]
|
||||
excess_data_gas=uint256(0), # [New in Deneb]
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
@@ -94,7 +94,7 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP4844_FORK_VERSION, # [Modified in EIP4844]
|
||||
current_version=DENEB_FORK_VERSION, # [Modified in Deneb]
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
@@ -127,10 +127,12 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP4844]
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb]
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
)
|
||||
|
||||
return post
|
||||
110
specs/deneb/light-client/fork.md
Normal file
110
specs/deneb/light-client/fork.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Deneb Light Client -- Fork Logic
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Upgrading light client data](#upgrading-light-client-data)
|
||||
- [Upgrading the store](#upgrading-the-store)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to Deneb. This is necessary when processing pre-Deneb data with a post-Deneb `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
|
||||
|
||||
### Upgrading light client data
|
||||
|
||||
A Deneb `LightClientStore` can still process earlier light client data. In order to do so, that pre-Deneb data needs to be locally upgraded to Deneb before processing.
|
||||
|
||||
```python
|
||||
def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=pre.beacon,
|
||||
execution=ExecutionPayloadHeader(
|
||||
parent_hash=pre.execution.parent_hash,
|
||||
fee_recipient=pre.execution.fee_recipient,
|
||||
state_root=pre.execution.state_root,
|
||||
receipts_root=pre.execution.receipts_root,
|
||||
logs_bloom=pre.execution.logs_bloom,
|
||||
prev_randao=pre.execution.prev_randao,
|
||||
block_number=pre.execution.block_number,
|
||||
gas_limit=pre.execution.gas_limit,
|
||||
gas_used=pre.execution.gas_used,
|
||||
timestamp=pre.execution.timestamp,
|
||||
extra_data=pre.execution.extra_data,
|
||||
base_fee_per_gas=pre.execution.base_fee_per_gas,
|
||||
block_hash=pre.execution.block_hash,
|
||||
transactions_root=pre.execution.transactions_root,
|
||||
withdrawals_root=pre.execution.withdrawals_root,
|
||||
),
|
||||
execution_branch=pre.execution_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_bootstrap_to_deneb(pre: capella.LightClientBootstrap) -> LightClientBootstrap:
|
||||
return LightClientBootstrap(
|
||||
header=upgrade_lc_header_to_deneb(pre.header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
current_sync_committee_branch=pre.current_sync_committee_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_update_to_deneb(pre: capella.LightClientUpdate) -> LightClientUpdate:
|
||||
return LightClientUpdate(
|
||||
attested_header=upgrade_lc_header_to_deneb(pre.attested_header),
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
next_sync_committee_branch=pre.next_sync_committee_branch,
|
||||
finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_deneb(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_deneb(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_deneb(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_deneb(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
### Upgrading the store
|
||||
|
||||
Existing `LightClientStore` objects based on Capella MUST be upgraded to Deneb before Deneb based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `DENEB_FORK_EPOCH`.
|
||||
|
||||
```python
|
||||
def upgrade_lc_store_to_deneb(pre: capella.LightClientStore) -> LightClientStore:
|
||||
if pre.best_valid_update is None:
|
||||
best_valid_update = None
|
||||
else:
|
||||
best_valid_update = upgrade_lc_update_to_deneb(pre.best_valid_update)
|
||||
return LightClientStore(
|
||||
finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
best_valid_update=best_valid_update,
|
||||
optimistic_header=upgrade_lc_header_to_deneb(pre.optimistic_header),
|
||||
previous_max_active_participants=pre.previous_max_active_participants,
|
||||
current_max_active_participants=pre.current_max_active_participants,
|
||||
)
|
||||
```
|
||||
74
specs/deneb/light-client/full-node.md
Normal file
74
specs/deneb/light-client/full-node.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Deneb Light Client -- Full Node
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds information about the execution payload to light client data as part of the Deneb upgrade.
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
epoch = compute_epoch_at_slot(block.message.slot)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
payload = block.message.body.execution_payload
|
||||
execution_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipts_root=payload.receipts_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
prev_randao=payload.prev_randao,
|
||||
block_number=payload.block_number,
|
||||
gas_limit=payload.gas_limit,
|
||||
gas_used=payload.gas_used,
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
|
||||
# [New in Deneb]
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
execution_header.excess_data_gas = payload.excess_data_gas
|
||||
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
execution=execution_header,
|
||||
execution_branch=execution_branch,
|
||||
)
|
||||
```
|
||||
105
specs/deneb/light-client/p2p-interface.md
Normal file
105
specs/deneb/light-client/p2p-interface.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Deneb Light Client -- Networking
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Networking](#networking)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`light_client_finality_update`](#light_client_finality_update)
|
||||
- [`light_client_optimistic_update`](#light_client_optimistic_update)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [GetLightClientBootstrap](#getlightclientbootstrap)
|
||||
- [LightClientUpdatesByRange](#lightclientupdatesbyrange)
|
||||
- [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
|
||||
- [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Networking
|
||||
|
||||
The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [Deneb light client data](./sync-protocol.md).
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
#### Topics and messages
|
||||
|
||||
##### Global topics
|
||||
|
||||
###### `light_client_finality_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientFinalityUpdate` |
|
||||
|
||||
###### `light_client_optimistic_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientOptimisticUpdate` |
|
||||
|
||||
### The Req/Resp domain
|
||||
|
||||
#### Messages
|
||||
|
||||
##### GetLightClientBootstrap
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientBootstrap` |
|
||||
|
||||
##### LightClientUpdatesByRange
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response chunk SSZ type |
|
||||
|--------------------------------------------------------|----------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientUpdate` |
|
||||
|
||||
##### GetLightClientFinalityUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientFinalityUpdate` |
|
||||
|
||||
##### GetLightClientOptimisticUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.LightClientOptimisticUpdate` |
|
||||
87
specs/deneb/light-client/sync-protocol.md
Normal file
87
specs/deneb/light-client/sync-protocol.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Deneb Light Client -- Sync Protocol
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `get_lc_execution_root`](#modified-get_lc_execution_root)
|
||||
- [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade updates light client data to include the Denbeb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb.
|
||||
|
||||
Additional documents describes the impact of the upgrade on certain roles:
|
||||
- [Full node](./full-node.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `get_lc_execution_root`
|
||||
|
||||
```python
|
||||
def get_lc_execution_root(header: LightClientHeader) -> Root:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
# [New in Deneb]
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return hash_tree_root(header.execution)
|
||||
|
||||
# [Modified in Deneb]
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
execution_header = capella.ExecutionPayloadHeader(
|
||||
parent_hash=header.execution.parent_hash,
|
||||
fee_recipient=header.execution.fee_recipient,
|
||||
state_root=header.execution.state_root,
|
||||
receipts_root=header.execution.receipts_root,
|
||||
logs_bloom=header.execution.logs_bloom,
|
||||
prev_randao=header.execution.prev_randao,
|
||||
block_number=header.execution.block_number,
|
||||
gas_limit=header.execution.gas_limit,
|
||||
gas_used=header.execution.gas_used,
|
||||
timestamp=header.execution.timestamp,
|
||||
extra_data=header.execution.extra_data,
|
||||
base_fee_per_gas=header.execution.base_fee_per_gas,
|
||||
block_hash=header.execution.block_hash,
|
||||
transactions_root=header.execution.transactions_root,
|
||||
withdrawals_root=header.execution.withdrawals_root,
|
||||
)
|
||||
return hash_tree_root(execution_header)
|
||||
|
||||
return Root()
|
||||
```
|
||||
|
||||
### Modified `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
# [New in Deneb]
|
||||
if epoch < DENEB_FORK_EPOCH:
|
||||
if header.execution.excess_data_gas != uint256(0):
|
||||
return False
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
304
specs/deneb/p2p-interface.md
Normal file
304
specs/deneb/p2p-interface.md
Normal file
@@ -0,0 +1,304 @@
|
||||
# Deneb -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for Deneb.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobSidecar`](#blobsidecar)
|
||||
- [`SignedBlobSidecar`](#signedblobsidecar)
|
||||
- [`BlobIdentifier`](#blobidentifier)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
|
||||
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
|
||||
| `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
||||
|
||||
## Containers
|
||||
|
||||
### `BlobSidecar`
|
||||
|
||||
```python
|
||||
class BlobSidecar(Container):
|
||||
block_root: Root
|
||||
index: BlobIndex # Index of blob in block
|
||||
slot: Slot
|
||||
block_parent_root: Root # Proposer shuffling determinant
|
||||
proposer_index: ValidatorIndex
|
||||
blob: Blob
|
||||
kzg_commitment: KZGCommitment
|
||||
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
|
||||
```
|
||||
|
||||
### `SignedBlobSidecar`
|
||||
|
||||
```python
|
||||
class SignedBlobSidecar(Container):
|
||||
message: BlobSidecar
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `BlobIdentifier`
|
||||
|
||||
```python
|
||||
class BlobIdentifier(Container):
|
||||
block_root: Root
|
||||
index: BlobIndex
|
||||
```
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
|
||||
The `beacon_block` topic is modified to also support deneb blocks and new topics are added per table below. All other topics remain stable.
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here.
|
||||
|
||||
The derivation of the `message-id` remains stable.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
||||
|
||||
#### Global topics
|
||||
|
||||
Deneb introduces new global topics for blob sidecars.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
||||
|
||||
##### `blob_sidecar_{index}`
|
||||
|
||||
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
||||
|
||||
The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
|
||||
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation.
|
||||
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey.
|
||||
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
|
||||
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
|
||||
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for this upgrade.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
The Deneb fork-digest is introduced to the `context` enum to specify Deneb beacon block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
#### BlobSidecarsByRoot v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
|
||||
|
||||
New in deneb.
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
|
||||
|
||||
Request Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK]
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK]
|
||||
)
|
||||
```
|
||||
|
||||
Requests sidecars by block root and index.
|
||||
The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests.
|
||||
It may be less in the case that the responding peer is missing blocks or sidecars.
|
||||
|
||||
The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer
|
||||
may not be available beyond the initial distribution via gossip.
|
||||
|
||||
No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time.
|
||||
|
||||
`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing).
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
|
||||
|
||||
Clients MUST respond with at least one sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
||||
#### BlobSidecarsByRange v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
|
||||
|
||||
New in deneb.
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
|
||||
|
||||
Request Content:
|
||||
```
|
||||
(
|
||||
start_slot: Slot
|
||||
count: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
```
|
||||
(
|
||||
List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice.
|
||||
|
||||
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`.
|
||||
|
||||
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
|
||||
|
||||
The request MUST be encoded as an SSZ-container.
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blobs on this range.
|
||||
|
||||
Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Such peers that are unable to successfully reply to this range of requests MAY get descored
|
||||
or disconnected at any time.
|
||||
|
||||
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
to be fully compliant with `BlobSidecarsByRange` requests.
|
||||
|
||||
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||
participating in the networking immediately, other peers MAY
|
||||
disconnect and/or temporarily ban such an un-synced or semi-synced client.
|
||||
|
||||
Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars.
|
||||
|
||||
Clients MUST include all blob sidecars of each block from which they include blob sidecars.
|
||||
|
||||
The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order.
|
||||
|
||||
Clients MAY limit the number of blob sidecars in the response.
|
||||
|
||||
The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars.
|
||||
|
||||
Clients MUST respond with blob sidecars from their view of the current fork choice
|
||||
-- that is, blob sidecars as included by blocks from the single chain defined by the current head.
|
||||
Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
||||
|
||||
Clients MUST respond with blob sidecars that are consistent from a single chain within the context of the request.
|
||||
|
||||
After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
|
||||
|
||||
## Design decision rationale
|
||||
|
||||
### Why are blobs relayed as a sidecar, separate from beacon blocks?
|
||||
|
||||
This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`:
|
||||
with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS)
|
||||
thus avoiding all blobs being downloaded by all beacon nodes on the network.
|
||||
|
||||
Such sharding design may introduce an updated `BlobSidecar` to identify the shard,
|
||||
but does not affect the `BeaconBlock` structure.
|
||||
563
specs/deneb/polynomial-commitments.md
Normal file
563
specs/deneb/polynomial-commitments.md
Normal file
@@ -0,0 +1,563 @@
|
||||
# Deneb -- Polynomial Commitments
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Preset](#preset)
|
||||
- [Blob](#blob)
|
||||
- [Crypto](#crypto)
|
||||
- [Trusted setup](#trusted-setup)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Bit-reversal permutation](#bit-reversal-permutation)
|
||||
- [`is_power_of_two`](#is_power_of_two)
|
||||
- [`reverse_bits`](#reverse_bits)
|
||||
- [`bit_reversal_permutation`](#bit_reversal_permutation)
|
||||
- [BLS12-381 helpers](#bls12-381-helpers)
|
||||
- [`hash_to_bls_field`](#hash_to_bls_field)
|
||||
- [`bytes_to_bls_field`](#bytes_to_bls_field)
|
||||
- [`validate_kzg_g1`](#validate_kzg_g1)
|
||||
- [`bytes_to_kzg_commitment`](#bytes_to_kzg_commitment)
|
||||
- [`bytes_to_kzg_proof`](#bytes_to_kzg_proof)
|
||||
- [`blob_to_polynomial`](#blob_to_polynomial)
|
||||
- [`compute_challenge`](#compute_challenge)
|
||||
- [`bls_modular_inverse`](#bls_modular_inverse)
|
||||
- [`div`](#div)
|
||||
- [`g1_lincomb`](#g1_lincomb)
|
||||
- [`compute_powers`](#compute_powers)
|
||||
- [Polynomials](#polynomials)
|
||||
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
|
||||
- [KZG](#kzg)
|
||||
- [`blob_to_kzg_commitment`](#blob_to_kzg_commitment)
|
||||
- [`verify_kzg_proof`](#verify_kzg_proof)
|
||||
- [`verify_kzg_proof_impl`](#verify_kzg_proof_impl)
|
||||
- [`verify_kzg_proof_batch`](#verify_kzg_proof_batch)
|
||||
- [`compute_kzg_proof`](#compute_kzg_proof)
|
||||
- [`compute_quotient_eval_within_domain`](#compute_quotient_eval_within_domain)
|
||||
- [`compute_kzg_proof_impl`](#compute_kzg_proof_impl)
|
||||
- [`compute_blob_kzg_proof`](#compute_blob_kzg_proof)
|
||||
- [`verify_blob_kzg_proof`](#verify_blob_kzg_proof)
|
||||
- [`verify_blob_kzg_proof_batch`](#verify_blob_kzg_proof_batch)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
|
||||
|
||||
Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||
|
||||
Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `G1Point` | `Bytes48` | |
|
||||
| `G2Point` | `Bytes96` | |
|
||||
| `BLSFieldElement` | `uint256` | Validation: `x < BLS_MODULUS` |
|
||||
| `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point |
|
||||
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
|
||||
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form |
|
||||
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
|
||||
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
|
||||
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
|
||||
|
||||
|
||||
## Preset
|
||||
|
||||
### Blob
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
|
||||
| `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` |
|
||||
| `RANDOM_CHALLENGE_KZG_BATCH_DOMAIN` | `b'RCKZGBATCH___V1_'` |
|
||||
|
||||
### Crypto
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `ROOTS_OF_UNITY` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | Roots of unity of order FIELD_ELEMENTS_PER_BLOB over the BLS12-381 field |
|
||||
|
||||
### Trusted setup
|
||||
|
||||
The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
|
||||
but reusing the `mainnet` settings in public networks is a critical security requirement.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `KZG_SETUP_G2_LENGTH` | `65` |
|
||||
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Bit-reversal permutation
|
||||
|
||||
All polynomials (which are always given in Lagrange form) should be interpreted as being in
|
||||
bit-reversal permutation. In practice, clients can implement this by storing the lists
|
||||
`KZG_SETUP_LAGRANGE` and `ROOTS_OF_UNITY` in bit-reversal permutation, so these functions only
|
||||
have to be called once at startup.
|
||||
|
||||
#### `is_power_of_two`
|
||||
|
||||
```python
|
||||
def is_power_of_two(value: int) -> bool:
|
||||
"""
|
||||
Check if ``value`` is a power of two integer.
|
||||
"""
|
||||
return (value > 0) and (value & (value - 1) == 0)
|
||||
```
|
||||
|
||||
#### `reverse_bits`
|
||||
|
||||
```python
|
||||
def reverse_bits(n: int, order: int) -> int:
|
||||
"""
|
||||
Reverse the bit order of an integer ``n``.
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
|
||||
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||
```
|
||||
|
||||
#### `bit_reversal_permutation`
|
||||
|
||||
```python
|
||||
def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]:
|
||||
"""
|
||||
Return a copy with bit-reversed permutation. The permutation is an involution (inverts itself).
|
||||
|
||||
The input and output are a sequence of generic type ``T`` objects.
|
||||
"""
|
||||
return [sequence[reverse_bits(i, len(sequence))] for i in range(len(sequence))]
|
||||
```
|
||||
|
||||
### BLS12-381 helpers
|
||||
|
||||
#### `hash_to_bls_field`
|
||||
|
||||
```python
|
||||
def hash_to_bls_field(data: bytes) -> BLSFieldElement:
|
||||
"""
|
||||
Hash ``data`` and convert the output to a BLS scalar field element.
|
||||
The output is not uniform over the BLS field.
|
||||
"""
|
||||
hashed_data = hash(data)
|
||||
return BLSFieldElement(int.from_bytes(hashed_data, ENDIANNESS) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `bytes_to_bls_field`
|
||||
|
||||
```python
|
||||
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
"""
|
||||
Convert untrusted bytes to a trusted and validated BLS scalar field element.
|
||||
This function does not accept inputs greater than the BLS modulus.
|
||||
"""
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
```
|
||||
|
||||
|
||||
#### `validate_kzg_g1`
|
||||
|
||||
```python
|
||||
def validate_kzg_g1(b: Bytes48) -> None:
|
||||
"""
|
||||
Perform BLS validation required by the types `KZGProof` and `KZGCommitment`.
|
||||
"""
|
||||
if b == G1_POINT_AT_INFINITY:
|
||||
return
|
||||
|
||||
assert bls.KeyValidate(b)
|
||||
```
|
||||
|
||||
#### `bytes_to_kzg_commitment`
|
||||
|
||||
```python
|
||||
def bytes_to_kzg_commitment(b: Bytes48) -> KZGCommitment:
|
||||
"""
|
||||
Convert untrusted bytes into a trusted and validated KZGCommitment.
|
||||
"""
|
||||
validate_kzg_g1(b)
|
||||
return KZGCommitment(b)
|
||||
```
|
||||
|
||||
#### `bytes_to_kzg_proof`
|
||||
|
||||
```python
|
||||
def bytes_to_kzg_proof(b: Bytes48) -> KZGProof:
|
||||
"""
|
||||
Convert untrusted bytes into a trusted and validated KZGProof.
|
||||
"""
|
||||
validate_kzg_g1(b)
|
||||
return KZGProof(b)
|
||||
```
|
||||
|
||||
#### `blob_to_polynomial`
|
||||
|
||||
```python
|
||||
def blob_to_polynomial(blob: Blob) -> Polynomial:
|
||||
"""
|
||||
Convert a blob to list of BLS field scalars.
|
||||
"""
|
||||
polynomial = Polynomial()
|
||||
for i in range(FIELD_ELEMENTS_PER_BLOB):
|
||||
value = bytes_to_bls_field(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT])
|
||||
polynomial[i] = value
|
||||
return polynomial
|
||||
```
|
||||
|
||||
#### `compute_challenge`
|
||||
|
||||
```python
|
||||
def compute_challenge(blob: Blob,
|
||||
commitment: KZGCommitment) -> BLSFieldElement:
|
||||
"""
|
||||
Return the Fiat-Shamir challenge required by the rest of the protocol.
|
||||
"""
|
||||
|
||||
# Append the degree of the polynomial as a domain separator
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, ENDIANNESS)
|
||||
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly
|
||||
|
||||
data += blob
|
||||
data += commitment
|
||||
|
||||
# Transcript has been prepared: time to create the challenge
|
||||
return hash_to_bls_field(data)
|
||||
```
|
||||
|
||||
#### `bls_modular_inverse`
|
||||
|
||||
```python
|
||||
def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Compute the modular inverse of x
|
||||
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
|
||||
"""
|
||||
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
|
||||
```
|
||||
|
||||
#### `div`
|
||||
|
||||
```python
|
||||
def div(x: BLSFieldElement, y: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Divide two field elements: ``x`` by `y``.
|
||||
"""
|
||||
return BLSFieldElement((int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `g1_lincomb`
|
||||
|
||||
```python
|
||||
def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> KZGCommitment:
|
||||
"""
|
||||
BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
|
||||
"""
|
||||
assert len(points) == len(scalars)
|
||||
result = bls.Z1
|
||||
for x, a in zip(points, scalars):
|
||||
result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a))
|
||||
return KZGCommitment(bls.G1_to_bytes48(result))
|
||||
```
|
||||
|
||||
#### `compute_powers`
|
||||
|
||||
```python
|
||||
def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Return ``x`` to power of [0, n-1], if n > 0. When n==0, an empty array is returned.
|
||||
"""
|
||||
current_power = 1
|
||||
powers = []
|
||||
for _ in range(n):
|
||||
powers.append(BLSFieldElement(current_power))
|
||||
current_power = current_power * int(x) % BLS_MODULUS
|
||||
return powers
|
||||
```
|
||||
|
||||
|
||||
### Polynomials
|
||||
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
|
||||
z: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
|
||||
Uses the barycentric formula:
|
||||
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
|
||||
"""
|
||||
width = len(polynomial)
|
||||
assert width == FIELD_ELEMENTS_PER_BLOB
|
||||
inverse_width = bls_modular_inverse(BLSFieldElement(width))
|
||||
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
|
||||
# If we are asked to evaluate within the domain, we already know the answer
|
||||
if z in roots_of_unity_brp:
|
||||
eval_index = roots_of_unity_brp.index(z)
|
||||
return BLSFieldElement(polynomial[eval_index])
|
||||
|
||||
result = 0
|
||||
for i in range(width):
|
||||
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
|
||||
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
|
||||
result += int(div(a, b) % BLS_MODULUS)
|
||||
result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
|
||||
return BLSFieldElement(result % BLS_MODULUS)
|
||||
```
|
||||
|
||||
### KZG
|
||||
|
||||
KZG core functions. These are also defined in Deneb execution specs.
|
||||
|
||||
#### `blob_to_kzg_commitment`
|
||||
|
||||
```python
|
||||
def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
|
||||
"""
|
||||
Public method.
|
||||
"""
|
||||
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob))
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof(commitment_bytes: Bytes48,
|
||||
z: Bytes32,
|
||||
y: Bytes32,
|
||||
proof_bytes: Bytes48) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
Receives inputs as bytes.
|
||||
Public method.
|
||||
"""
|
||||
return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes),
|
||||
bytes_to_bls_field(z),
|
||||
bytes_to_bls_field(y),
|
||||
bytes_to_kzg_proof(proof_bytes))
|
||||
```
|
||||
|
||||
|
||||
#### `verify_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_impl(commitment: KZGCommitment,
|
||||
z: BLSFieldElement,
|
||||
y: BLSFieldElement,
|
||||
proof: KZGProof) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
"""
|
||||
# Verify: P - y = Q * (X - z)
|
||||
X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
|
||||
P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
|
||||
return bls.pairing_check([
|
||||
[P_minus_y, bls.neg(bls.G2)],
|
||||
[bls.bytes48_to_G1(proof), X_minus_z]
|
||||
])
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
|
||||
zs: Sequence[BLSFieldElement],
|
||||
ys: Sequence[BLSFieldElement],
|
||||
proofs: Sequence[KZGProof]) -> bool:
|
||||
"""
|
||||
Verify multiple KZG proofs efficiently.
|
||||
"""
|
||||
|
||||
assert len(commitments) == len(zs) == len(ys) == len(proofs)
|
||||
|
||||
# Compute a random challenge. Note that it does not have to be computed from a hash,
|
||||
# r just has to be random.
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
|
||||
num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS)
|
||||
data = RANDOM_CHALLENGE_KZG_BATCH_DOMAIN + degree_poly + num_commitments
|
||||
|
||||
# Append all inputs to the transcript before we hash
|
||||
for commitment, z, y, proof in zip(commitments, zs, ys, proofs):
|
||||
data += commitment \
|
||||
+ int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
|
||||
+ int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
|
||||
+ proof
|
||||
|
||||
r = hash_to_bls_field(data)
|
||||
r_powers = compute_powers(r, len(commitments))
|
||||
|
||||
# Verify: e(sum r^i proof_i, [s]) ==
|
||||
# e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1])
|
||||
proof_lincomb = g1_lincomb(proofs, r_powers)
|
||||
proof_z_lincomb = g1_lincomb(
|
||||
proofs,
|
||||
[BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)],
|
||||
)
|
||||
C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
|
||||
for commitment, y in zip(commitments, ys)]
|
||||
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
|
||||
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
|
||||
|
||||
return bls.pairing_check([
|
||||
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))],
|
||||
[bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2]
|
||||
])
|
||||
```
|
||||
|
||||
#### `compute_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof:
|
||||
"""
|
||||
Compute KZG proof at point `z` for the polynomial represented by `blob`.
|
||||
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z).
|
||||
Public method.
|
||||
"""
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z))
|
||||
```
|
||||
|
||||
#### `compute_quotient_eval_within_domain`
|
||||
|
||||
```python
|
||||
def compute_quotient_eval_within_domain(z: BLSFieldElement,
|
||||
polynomial: Polynomial,
|
||||
y: BLSFieldElement
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Given `y == p(z)` for a polynomial `p(x)`, compute `q(z)`: the KZG quotient polynomial evaluated at `z` for the
|
||||
special case where `z` is in `ROOTS_OF_UNITY`.
|
||||
|
||||
For more details, read https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html section "Dividing
|
||||
when one of the points is zero". The code below computes q(x_m) for the roots of unity special case.
|
||||
"""
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
result = 0
|
||||
for i, omega_i in enumerate(roots_of_unity_brp):
|
||||
if omega_i == z: # skip the evaluation point in the sum
|
||||
continue
|
||||
|
||||
f_i = int(BLS_MODULUS) + int(polynomial[i]) - int(y) % BLS_MODULUS
|
||||
numerator = f_i * int(omega_i) % BLS_MODULUS
|
||||
denominator = int(z) * (int(BLS_MODULUS) + int(z) - int(omega_i)) % BLS_MODULUS
|
||||
result += int(div(BLSFieldElement(numerator), BLSFieldElement(denominator)))
|
||||
|
||||
return BLSFieldElement(result % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `compute_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
|
||||
"""
|
||||
Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`.
|
||||
"""
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
|
||||
# For all x_i, compute p(x_i) - p(z)
|
||||
y = evaluate_polynomial_in_evaluation_form(polynomial, z)
|
||||
polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial]
|
||||
|
||||
# For all x_i, compute (x_i - z)
|
||||
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
|
||||
for x in roots_of_unity_brp]
|
||||
|
||||
# Compute the quotient polynomial directly in evaluation form
|
||||
quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB
|
||||
for i, (a, b) in enumerate(zip(polynomial_shifted, denominator_poly)):
|
||||
if b == 0:
|
||||
# The denominator is zero hence `z` is a root of unity: we must handle it as a special case
|
||||
quotient_polynomial[i] = compute_quotient_eval_within_domain(roots_of_unity_brp[i], polynomial, y)
|
||||
else:
|
||||
# Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z).
|
||||
quotient_polynomial[i] = div(a, b)
|
||||
|
||||
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
|
||||
```
|
||||
|
||||
#### `compute_blob_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_blob_kzg_proof(blob: Blob) -> KZGProof:
|
||||
"""
|
||||
Given a blob, return the KZG proof that is used to verify it against the commitment.
|
||||
Public method.
|
||||
"""
|
||||
commitment = blob_to_kzg_commitment(blob)
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
evaluation_challenge = compute_challenge(blob, commitment)
|
||||
return compute_kzg_proof_impl(polynomial, evaluation_challenge)
|
||||
```
|
||||
|
||||
#### `verify_blob_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_blob_kzg_proof(blob: Blob,
|
||||
commitment_bytes: Bytes48,
|
||||
proof_bytes: Bytes48) -> bool:
|
||||
"""
|
||||
Given a blob and a KZG proof, verify that the blob data corresponds to the provided commitment.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
commitment = bytes_to_kzg_commitment(commitment_bytes)
|
||||
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
evaluation_challenge = compute_challenge(blob, commitment)
|
||||
|
||||
# Evaluate polynomial at `evaluation_challenge`
|
||||
y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)
|
||||
|
||||
# Verify proof
|
||||
proof = bytes_to_kzg_proof(proof_bytes)
|
||||
return verify_kzg_proof_impl(commitment, evaluation_challenge, y, proof)
|
||||
```
|
||||
|
||||
#### `verify_blob_kzg_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
||||
commitments_bytes: Sequence[Bytes48],
|
||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
"""
|
||||
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
|
||||
assert len(blobs) == len(commitments_bytes) == len(proofs_bytes)
|
||||
|
||||
commitments, evaluation_challenges, ys, proofs = [], [], [], []
|
||||
for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes):
|
||||
commitment = bytes_to_kzg_commitment(commitment_bytes)
|
||||
commitments.append(commitment)
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
evaluation_challenge = compute_challenge(blob, commitment)
|
||||
evaluation_challenges.append(evaluation_challenge)
|
||||
ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge))
|
||||
proofs.append(bytes_to_kzg_proof(proof_bytes))
|
||||
|
||||
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
|
||||
```
|
||||
@@ -1,4 +1,4 @@
|
||||
# EIP-4844 -- Honest Validator
|
||||
# Deneb -- Honest Validator
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
@@ -16,8 +16,7 @@
|
||||
- [Block and sidecar proposal](#block-and-sidecar-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [Blob KZG commitments](#blob-kzg-commitments)
|
||||
- [Constructing the `SignedBeaconBlockAndBlobsSidecar`](#constructing-the-signedbeaconblockandblobssidecar)
|
||||
- [Block](#block)
|
||||
- [Constructing the `SignedBlobSidecar`s](#constructing-the-signedblobsidecars)
|
||||
- [Sidecar](#sidecar)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@@ -25,14 +24,14 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This document represents the changes to be made in the code of an "honest validator" to implement EIP-4844.
|
||||
This document represents the changes to be made in the code of an "honest validator" to implement Deneb.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP-4844](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Helpers
|
||||
@@ -53,7 +52,6 @@ def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFi
|
||||
## Beacon chain responsibilities
|
||||
|
||||
All validator responsibilities remain unchanged other than those noted below.
|
||||
Namely, the blob handling and the addition of `SignedBeaconBlockAndBlobsSidecar`.
|
||||
|
||||
### Block and sidecar proposal
|
||||
|
||||
@@ -79,29 +77,47 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
|
||||
|
||||
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
|
||||
|
||||
#### Constructing the `SignedBeaconBlockAndBlobsSidecar`
|
||||
To construct a `SignedBeaconBlockAndBlobsSidecar`, a `signed_beacon_block_and_blobs_sidecar` is defined with the necessary context for block and sidecar proposal.
|
||||
#### Constructing the `SignedBlobSidecar`s
|
||||
|
||||
##### Block
|
||||
Set `signed_beacon_block_and_blobs_sidecar.beacon_block = block` where `block` is obtained above.
|
||||
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
|
||||
|
||||
##### Sidecar
|
||||
Coupled with block, the corresponding blobs are packaged into a sidecar object for distribution to the network.
|
||||
|
||||
Set `signed_beacon_block_and_blobs_sidecar.blobs_sidecar = sidecar` where `sidecar` is obtained from:
|
||||
Blobs associated with a block are packaged into sidecar objects for distribution to the network.
|
||||
|
||||
Each `sidecar` is obtained from:
|
||||
```python
|
||||
def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar:
|
||||
return BlobsSidecar(
|
||||
beacon_block_root=hash_tree_root(block),
|
||||
beacon_block_slot=block.slot,
|
||||
blobs=blobs,
|
||||
kzg_aggregated_proof=compute_aggregate_kzg_proof(blobs),
|
||||
)
|
||||
def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]:
|
||||
return [
|
||||
BlobSidecar(
|
||||
block_root=hash_tree_root(block),
|
||||
index=index,
|
||||
slot=block.slot,
|
||||
block_parent_root=block.parent_root,
|
||||
blob=blob,
|
||||
kzg_commitment=block.body.blob_kzg_commitments[index],
|
||||
kzg_proof=compute_blob_kzg_proof(blob),
|
||||
)
|
||||
for index, blob in enumerate(blobs)
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
This `signed_beacon_block_and_blobs_sidecar` is then published to the global `beacon_block_and_blobs_sidecar` topic.
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
|
||||
|
||||
`signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_blob_sidecar_signature(state: BeaconState,
|
||||
sidecar: BlobSidecar,
|
||||
privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BLOB_SIDECAR, compute_epoch_at_slot(sidecar.slot))
|
||||
signing_root = compute_signing_root(sidecar, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
|
||||
|
||||
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable,
|
||||
to ensure the data-availability of these blobs throughout the network.
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
# EIP-4844 -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for EIP-4844.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobsSidecar`](#blobssidecar)
|
||||
- [`SignedBeaconBlockAndBlobsSidecar`](#signedbeaconblockandblobssidecar)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`beacon_block_and_blobs_sidecar`](#beacon_block_and_blobs_sidecar)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BeaconBlockAndBlobsSidecarByRoot v1](#beaconblockandblobssidecarbyroot-v1)
|
||||
- [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||
| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars |
|
||||
|
||||
## Containers
|
||||
|
||||
### `BlobsSidecar`
|
||||
|
||||
```python
|
||||
class BlobsSidecar(Container):
|
||||
beacon_block_root: Root
|
||||
beacon_block_slot: Slot
|
||||
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
|
||||
kzg_aggregated_proof: KZGProof
|
||||
```
|
||||
|
||||
### `SignedBeaconBlockAndBlobsSidecar`
|
||||
|
||||
```python
|
||||
class SignedBeaconBlockAndBlobsSidecar(Container):
|
||||
beacon_block: SignedBeaconBlock
|
||||
blobs_sidecar: BlobsSidecar
|
||||
```
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in the fork of EIP-4844 to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
The `beacon_block` topic is deprecated and replaced by the `beacon_block_and_blobs_sidecar` topic. All other topics remain stable.
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here.
|
||||
|
||||
The derivation of the `message-id` remains stable.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) |
|
||||
|
||||
#### Global topics
|
||||
|
||||
EIP-4844 introduces a new global topic for beacon block and blobs-sidecars.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
This topic is deprecated and clients **MUST NOT** expose in their topic set to any peer. Implementers do not need to do
|
||||
anything beyond simply skip implementation, and it is explicitly called out as it is a departure from previous versioning
|
||||
of this topic.
|
||||
|
||||
Refer to [the section below](#transitioning-the-gossip) for details on how to transition the gossip.
|
||||
|
||||
##### `beacon_block_and_blobs_sidecar`
|
||||
|
||||
This topic is used to propagate new signed and coupled beacon blocks and blobs sidecars to all nodes on the networks.
|
||||
|
||||
In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
|
||||
Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
|
||||
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 points
|
||||
-- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzg_commitments)`
|
||||
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list
|
||||
-- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
|
||||
|
||||
Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`.
|
||||
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
-- i.e. `sidecar.beacon_block_slot == block.slot`.
|
||||
- _[REJECT]_ the `sidecar.blobs` are all well formatted, i.e. the `BLSFieldElement` in valid range (`x < BLS_MODULUS`).
|
||||
- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 point
|
||||
-- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)`
|
||||
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar
|
||||
-- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)`
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for this upgrade.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
The EIP-4844 fork-digest is introduced to the `context` enum to specify EIP-4844 beacon block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
After `EIP4844_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`
|
||||
clients MUST support requesting blocks by root for pre-fork-epoch blocks.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlockAndBlobsSidecarByRoot v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_block_and_blobs_sidecar_by_root/1/`
|
||||
|
||||
Request Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[Root, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[SignedBeaconBlockAndBlobsSidecar, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlockAndBlobsSidecar.beacon_block.message)`).
|
||||
The response is a list of `SignedBeaconBlockAndBlobsSidecar` whose length is less than or equal to the number of requests.
|
||||
It may be less in the case that the responding peer is missing blocks and sidecars.
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
|
||||
|
||||
`BeaconBlockAndBlobsSidecarByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown).
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload.
|
||||
|
||||
Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
Clients MUST respond with at least one block and sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
||||
#### BlobsSidecarsByRange v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blobs_sidecars_by_range/1/`
|
||||
|
||||
Request Content:
|
||||
```
|
||||
(
|
||||
start_slot: Slot
|
||||
count: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
```
|
||||
(
|
||||
List[BlobsSidecar, MAX_REQUEST_BLOBS_SIDECARS]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blobs sidecars in the slot range `[start_slot, start_slot + count)`,
|
||||
leading up to the current head block as selected by fork choice.
|
||||
|
||||
The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the beacon block proposer
|
||||
may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and
|
||||
correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`.
|
||||
|
||||
`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window.
|
||||
|
||||
The request MUST be encoded as an SSZ-container.
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload.
|
||||
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH), current_epoch]`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blocks on this range.
|
||||
|
||||
Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Such peers that are unable to successfully reply to this range of requests MAY get descored
|
||||
or disconnected at any time.
|
||||
|
||||
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
to be fully compliant with `BlobsSidecarsByRange` requests.
|
||||
|
||||
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||
participating in the networking immediately, other peers MAY
|
||||
disconnect and/or temporarily ban such an un-synced or semi-synced client.
|
||||
|
||||
Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it,
|
||||
and no more than `MAX_REQUEST_BLOBS_SIDECARS` sidecars.
|
||||
|
||||
The following blobs sidecars, where they exist, MUST be sent in consecutive order.
|
||||
|
||||
Clients MAY limit the number of blobs sidecars in the response.
|
||||
|
||||
The response MUST contain no more than `count` blobs sidecars.
|
||||
|
||||
Clients MUST respond with blobs sidecars from their view of the current fork choice
|
||||
-- that is, blobs sidecars as included by blocks from the single chain defined by the current head.
|
||||
Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
||||
|
||||
Clients MUST respond with blobs sidecars that are consistent from a single chain within the context of the request.
|
||||
|
||||
After the initial blobs sidecar, clients MAY stop in the process of responding
|
||||
if their fork choice changes the view of the chain in the context of the request.
|
||||
|
||||
# Design decision rationale
|
||||
|
||||
## Why are blobs relayed as a sidecar, separate from beacon blocks?
|
||||
|
||||
This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`:
|
||||
with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS)
|
||||
thus avoiding all blobs being downloaded by all beacon nodes on the network.
|
||||
|
||||
Such sharding design may introduce an updated `BlobsSidecar` to identify the shard,
|
||||
but does not affect the `BeaconBlock` structure.
|
||||
@@ -1,454 +0,0 @@
|
||||
# EIP-4844 -- Polynomial Commitments
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Preset](#preset)
|
||||
- [Blob](#blob)
|
||||
- [Crypto](#crypto)
|
||||
- [Trusted setup](#trusted-setup)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Bit-reversal permutation](#bit-reversal-permutation)
|
||||
- [`is_power_of_two`](#is_power_of_two)
|
||||
- [`reverse_bits`](#reverse_bits)
|
||||
- [`bit_reversal_permutation`](#bit_reversal_permutation)
|
||||
- [BLS12-381 helpers](#bls12-381-helpers)
|
||||
- [`hash_to_bls_field`](#hash_to_bls_field)
|
||||
- [`bytes_to_bls_field`](#bytes_to_bls_field)
|
||||
- [`blob_to_polynomial`](#blob_to_polynomial)
|
||||
- [`compute_challenges`](#compute_challenges)
|
||||
- [`bls_modular_inverse`](#bls_modular_inverse)
|
||||
- [`div`](#div)
|
||||
- [`g1_lincomb`](#g1_lincomb)
|
||||
- [`poly_lincomb`](#poly_lincomb)
|
||||
- [`compute_powers`](#compute_powers)
|
||||
- [Polynomials](#polynomials)
|
||||
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
|
||||
- [KZG](#kzg)
|
||||
- [`blob_to_kzg_commitment`](#blob_to_kzg_commitment)
|
||||
- [`verify_kzg_proof`](#verify_kzg_proof)
|
||||
- [`verify_kzg_proof_impl`](#verify_kzg_proof_impl)
|
||||
- [`compute_kzg_proof`](#compute_kzg_proof)
|
||||
- [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment)
|
||||
- [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof)
|
||||
- [`verify_aggregate_kzg_proof`](#verify_aggregate_kzg_proof)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the EIP-4844 specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
|
||||
|
||||
Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `G1Point` | `Bytes48` | |
|
||||
| `G2Point` | `Bytes96` | |
|
||||
| `BLSFieldElement` | `uint256` | `x < BLS_MODULUS` |
|
||||
| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
|
||||
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
|
||||
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | a polynomial in evaluation form |
|
||||
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | a basic blob data |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
|
||||
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
|
||||
|
||||
## Preset
|
||||
|
||||
### Blob
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
|
||||
| `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` |
|
||||
|
||||
### Crypto
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `ROOTS_OF_UNITY` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | Roots of unity of order FIELD_ELEMENTS_PER_BLOB over the BLS12-381 field |
|
||||
|
||||
### Trusted setup
|
||||
|
||||
The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
|
||||
but reusing the `mainnet` settings in public networks is a critical security requirement.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `KZG_SETUP_G2_LENGTH` | `65` |
|
||||
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Bit-reversal permutation
|
||||
|
||||
All polynomials (which are always given in Lagrange form) should be interpreted as being in
|
||||
bit-reversal permutation. In practice, clients can implement this by storing the lists
|
||||
`KZG_SETUP_LAGRANGE` and `ROOTS_OF_UNITY` in bit-reversal permutation, so these functions only
|
||||
have to be called once at startup.
|
||||
|
||||
#### `is_power_of_two`
|
||||
|
||||
```python
|
||||
def is_power_of_two(value: int) -> bool:
|
||||
"""
|
||||
Check if ``value`` is a power of two integer.
|
||||
"""
|
||||
return (value > 0) and (value & (value - 1) == 0)
|
||||
```
|
||||
|
||||
#### `reverse_bits`
|
||||
|
||||
```python
|
||||
def reverse_bits(n: int, order: int) -> int:
|
||||
"""
|
||||
Reverse the bit order of an integer ``n``.
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
|
||||
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||
```
|
||||
|
||||
#### `bit_reversal_permutation`
|
||||
|
||||
```python
|
||||
def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]:
|
||||
"""
|
||||
Return a copy with bit-reversed permutation. The permutation is an involution (inverts itself).
|
||||
|
||||
The input and output are a sequence of generic type ``T`` objects.
|
||||
"""
|
||||
return [sequence[reverse_bits(i, len(sequence))] for i in range(len(sequence))]
|
||||
```
|
||||
|
||||
### BLS12-381 helpers
|
||||
|
||||
#### `hash_to_bls_field`
|
||||
|
||||
```python
|
||||
def hash_to_bls_field(data: bytes) -> BLSFieldElement:
|
||||
"""
|
||||
Hash ``data`` and convert the output to a BLS scalar field element.
|
||||
The output is not uniform over the BLS field.
|
||||
"""
|
||||
hashed_data = hash(data)
|
||||
return BLSFieldElement(int.from_bytes(hashed_data, ENDIANNESS) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `bytes_to_bls_field`
|
||||
|
||||
```python
|
||||
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
"""
|
||||
Convert 32-byte value to a BLS scalar field element.
|
||||
This function does not accept inputs greater than the BLS modulus.
|
||||
"""
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
```
|
||||
|
||||
#### `blob_to_polynomial`
|
||||
|
||||
```python
|
||||
def blob_to_polynomial(blob: Blob) -> Polynomial:
|
||||
"""
|
||||
Convert a blob to list of BLS field scalars.
|
||||
"""
|
||||
polynomial = Polynomial()
|
||||
for i in range(FIELD_ELEMENTS_PER_BLOB):
|
||||
value = bytes_to_bls_field(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT])
|
||||
polynomial[i] = value
|
||||
return polynomial
|
||||
```
|
||||
|
||||
#### `compute_challenges`
|
||||
|
||||
```python
|
||||
def compute_challenges(polynomials: Sequence[Polynomial],
|
||||
commitments: Sequence[KZGCommitment]) -> Tuple[Sequence[BLSFieldElement], BLSFieldElement]:
|
||||
"""
|
||||
Return the Fiat-Shamir challenges required by the rest of the protocol.
|
||||
The Fiat-Shamir logic works as per the following pseudocode:
|
||||
|
||||
hashed_data = hash(DOMAIN_SEPARATOR, polynomials, commitments)
|
||||
r = hash(hashed_data, 0)
|
||||
r_powers = [1, r, r**2, r**3, ...]
|
||||
eval_challenge = hash(hashed_data, 1)
|
||||
|
||||
Then return `r_powers` and `eval_challenge` after converting them to BLS field elements.
|
||||
The resulting field elements are not uniform over the BLS field.
|
||||
"""
|
||||
# Append the number of polynomials and the degree of each polynomial as a domain separator
|
||||
num_polynomials = int.to_bytes(len(polynomials), 8, ENDIANNESS)
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
|
||||
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials
|
||||
|
||||
# Append each polynomial which is composed by field elements
|
||||
for poly in polynomials:
|
||||
for field_element in poly:
|
||||
data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
|
||||
|
||||
# Append serialized G1 points
|
||||
for commitment in commitments:
|
||||
data += commitment
|
||||
|
||||
# Transcript has been prepared: time to create the challenges
|
||||
hashed_data = hash(data)
|
||||
r = hash_to_bls_field(hashed_data + b'\x00')
|
||||
r_powers = compute_powers(r, len(commitments))
|
||||
eval_challenge = hash_to_bls_field(hashed_data + b'\x01')
|
||||
|
||||
return r_powers, eval_challenge
|
||||
```
|
||||
|
||||
#### `bls_modular_inverse`
|
||||
|
||||
```python
|
||||
def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Compute the modular inverse of x
|
||||
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
|
||||
"""
|
||||
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
|
||||
```
|
||||
|
||||
#### `div`
|
||||
|
||||
```python
|
||||
def div(x: BLSFieldElement, y: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Divide two field elements: ``x`` by `y``.
|
||||
"""
|
||||
return BLSFieldElement((int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `g1_lincomb`
|
||||
|
||||
```python
|
||||
def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> KZGCommitment:
|
||||
"""
|
||||
BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
|
||||
"""
|
||||
assert len(points) == len(scalars)
|
||||
result = bls.Z1
|
||||
for x, a in zip(points, scalars):
|
||||
result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a))
|
||||
return KZGCommitment(bls.G1_to_bytes48(result))
|
||||
```
|
||||
|
||||
#### `poly_lincomb`
|
||||
|
||||
```python
|
||||
def poly_lincomb(polys: Sequence[Polynomial],
|
||||
scalars: Sequence[BLSFieldElement]) -> Polynomial:
|
||||
"""
|
||||
Given a list of ``polynomials``, interpret it as a 2D matrix and compute the linear combination
|
||||
of each column with `scalars`: return the resulting polynomials.
|
||||
"""
|
||||
assert len(polys) == len(scalars)
|
||||
result = [0] * FIELD_ELEMENTS_PER_BLOB
|
||||
for v, s in zip(polys, scalars):
|
||||
for i, x in enumerate(v):
|
||||
result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS
|
||||
return Polynomial([BLSFieldElement(x) for x in result])
|
||||
```
|
||||
|
||||
#### `compute_powers`
|
||||
|
||||
```python
|
||||
def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Return ``x`` to power of [0, n-1], if n > 0. When n==0, an empty array is returned.
|
||||
"""
|
||||
current_power = 1
|
||||
powers = []
|
||||
for _ in range(n):
|
||||
powers.append(BLSFieldElement(current_power))
|
||||
current_power = current_power * int(x) % BLS_MODULUS
|
||||
return powers
|
||||
```
|
||||
|
||||
|
||||
### Polynomials
|
||||
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
|
||||
z: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
|
||||
Uses the barycentric formula:
|
||||
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
|
||||
"""
|
||||
width = len(polynomial)
|
||||
assert width == FIELD_ELEMENTS_PER_BLOB
|
||||
inverse_width = bls_modular_inverse(BLSFieldElement(width))
|
||||
|
||||
# Make sure we won't divide by zero during division
|
||||
assert z not in ROOTS_OF_UNITY
|
||||
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
|
||||
result = 0
|
||||
for i in range(width):
|
||||
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
|
||||
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
|
||||
result += int(div(a, b) % BLS_MODULUS)
|
||||
result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
|
||||
return BLSFieldElement(result % BLS_MODULUS)
|
||||
```
|
||||
|
||||
### KZG
|
||||
|
||||
KZG core functions. These are also defined in EIP-4844 execution specs.
|
||||
|
||||
#### `blob_to_kzg_commitment`
|
||||
|
||||
```python
|
||||
def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
|
||||
"""
|
||||
Public method.
|
||||
"""
|
||||
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob))
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof(polynomial_kzg: KZGCommitment,
|
||||
z: Bytes32,
|
||||
y: Bytes32,
|
||||
kzg_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
Receives inputs as bytes.
|
||||
Public method.
|
||||
"""
|
||||
return verify_kzg_proof_impl(polynomial_kzg, bytes_to_bls_field(z), bytes_to_bls_field(y), kzg_proof)
|
||||
```
|
||||
|
||||
|
||||
#### `verify_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_impl(polynomial_kzg: KZGCommitment,
|
||||
z: BLSFieldElement,
|
||||
y: BLSFieldElement,
|
||||
kzg_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
"""
|
||||
# Verify: P - y = Q * (X - z)
|
||||
X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
|
||||
P_minus_y = bls.add(bls.bytes48_to_G1(polynomial_kzg), bls.multiply(bls.G1, BLS_MODULUS - y))
|
||||
return bls.pairing_check([
|
||||
[P_minus_y, bls.neg(bls.G2)],
|
||||
[bls.bytes48_to_G1(kzg_proof), X_minus_z]
|
||||
])
|
||||
```
|
||||
|
||||
#### `compute_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
|
||||
"""
|
||||
Compute KZG proof at point `z` with `polynomial` being in evaluation form
|
||||
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z)
|
||||
"""
|
||||
y = evaluate_polynomial_in_evaluation_form(polynomial, z)
|
||||
polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial]
|
||||
|
||||
# Make sure we won't divide by zero during division
|
||||
assert z not in ROOTS_OF_UNITY
|
||||
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
|
||||
for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
|
||||
|
||||
# Calculate quotient polynomial by doing point-by-point division
|
||||
quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)]
|
||||
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
|
||||
```
|
||||
|
||||
#### `compute_aggregated_poly_and_commitment`
|
||||
|
||||
```python
|
||||
def compute_aggregated_poly_and_commitment(
|
||||
blobs: Sequence[Blob],
|
||||
kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment, BLSFieldElement]:
|
||||
"""
|
||||
Return (1) the aggregated polynomial, (2) the aggregated KZG commitment,
|
||||
and (3) the polynomial evaluation random challenge.
|
||||
This function should also work with blobs == [] and kzg_commitments == []
|
||||
"""
|
||||
assert len(blobs) == len(kzg_commitments)
|
||||
|
||||
# Convert blobs to polynomials
|
||||
polynomials = [blob_to_polynomial(blob) for blob in blobs]
|
||||
|
||||
# Generate random linear combination and evaluation challenges
|
||||
r_powers, evaluation_challenge = compute_challenges(polynomials, kzg_commitments)
|
||||
|
||||
# Create aggregated polynomial in evaluation form
|
||||
aggregated_poly = poly_lincomb(polynomials, r_powers)
|
||||
|
||||
# Compute commitment to aggregated polynomial
|
||||
aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers))
|
||||
|
||||
return aggregated_poly, aggregated_poly_commitment, evaluation_challenge
|
||||
```
|
||||
|
||||
#### `compute_aggregate_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof:
|
||||
"""
|
||||
Given a list of blobs, return the aggregated KZG proof that is used to verify them against their commitments.
|
||||
Public method.
|
||||
"""
|
||||
commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
|
||||
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
|
||||
blobs,
|
||||
commitments
|
||||
)
|
||||
return compute_kzg_proof(aggregated_poly, evaluation_challenge)
|
||||
```
|
||||
|
||||
#### `verify_aggregate_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_aggregate_kzg_proof(blobs: Sequence[Blob],
|
||||
expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
kzg_aggregated_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
|
||||
blobs,
|
||||
expected_kzg_commitments,
|
||||
)
|
||||
|
||||
# Evaluate aggregated polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero)
|
||||
y = evaluate_polynomial_in_evaluation_form(aggregated_poly, evaluation_challenge)
|
||||
|
||||
# Verify aggregated proof
|
||||
return verify_kzg_proof_impl(aggregated_poly_commitment, evaluation_challenge, y, kzg_aggregated_proof)
|
||||
```
|
||||
@@ -43,11 +43,12 @@ This document is the beacon chain fork choice spec, part of Phase 0. It assumes
|
||||
|
||||
## Fork choice
|
||||
|
||||
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state)` and update `store` by running:
|
||||
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state, genesis_block)` and update `store` by running:
|
||||
|
||||
- `on_tick(store, time)` whenever `time > store.time` where `time` is the current Unix time
|
||||
- `on_block(store, block)` whenever a block `block: SignedBeaconBlock` is received
|
||||
- `on_attestation(store, attestation)` whenever an attestation `attestation` is received
|
||||
- `on_attester_slashing(store, attester_slashing)` whenever an attester slashing `attester_slashing` is received
|
||||
|
||||
Any of the above handlers that trigger an unhandled exception (e.g. a failed assert or an out-of-range list access) are considered invalid. Invalid calls to handlers must not modify `store`.
|
||||
|
||||
@@ -193,10 +194,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
|
||||
proposer_score = Gwei(0)
|
||||
# Boost is applied if ``root`` is an ancestor of ``proposer_boost_root``
|
||||
if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root:
|
||||
num_validators = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
avg_balance = get_total_active_balance(state) // num_validators
|
||||
committee_size = num_validators // SLOTS_PER_EPOCH
|
||||
committee_weight = committee_size * avg_balance
|
||||
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
return attestation_score + proposer_score
|
||||
|
||||
@@ -485,4 +483,4 @@ def on_attester_slashing(store: Store, attester_slashing: AttesterSlashing) -> N
|
||||
indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
|
||||
for index in indices:
|
||||
store.equivocating_indices.add(index)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.3.0-alpha.2
|
||||
1.3.0-rc.3
|
||||
|
||||
@@ -10,8 +10,8 @@ from eth2spec.test.context import (
|
||||
@spec_state_test
|
||||
def test_current_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
current_sync_committee_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
current_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
@@ -31,8 +31,8 @@ def test_current_sync_committee_merkle_proof(spec, state):
|
||||
@spec_state_test
|
||||
def test_next_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
next_sync_committee_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.NEXT_SYNC_COMMITTEE_INDEX,
|
||||
@@ -52,8 +52,8 @@ def test_next_sync_committee_merkle_proof(spec, state):
|
||||
@spec_state_test
|
||||
def test_finality_root_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
finality_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
|
||||
finality_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.FINALIZED_ROOT_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
|
||||
"leaf_index": spec.FINALIZED_ROOT_INDEX,
|
||||
|
||||
@@ -3,14 +3,30 @@ from typing import (Any, Dict, List)
|
||||
from eth_utils import encode_hex
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test_with_matching_config,
|
||||
spec_test,
|
||||
with_config_overrides,
|
||||
with_matching_spec_config,
|
||||
with_phases,
|
||||
with_presets,
|
||||
with_state,
|
||||
with_altair_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_slots_with_attestations,
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
MINIMAL,
|
||||
ALL_PHASES,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_fork,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella, is_post_deneb,
|
||||
is_post_fork,
|
||||
)
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
)
|
||||
@@ -20,25 +36,150 @@ from eth2spec.test.helpers.state import (
|
||||
)
|
||||
|
||||
|
||||
def setup_test(spec, state):
|
||||
class LightClientSyncTest(object):
|
||||
steps: List[Dict[str, Any]]
|
||||
genesis_validators_root: spec.Root
|
||||
store: spec.LightClientStore
|
||||
def get_spec_for_fork_version(spec, fork_version, phases):
|
||||
if phases is None:
|
||||
return spec
|
||||
for fork in [fork for fork in ALL_PHASES if is_post_fork(spec.fork, fork)]:
|
||||
if fork == PHASE0:
|
||||
fork_version_field = 'GENESIS_FORK_VERSION'
|
||||
else:
|
||||
fork_version_field = fork.upper() + '_FORK_VERSION'
|
||||
if fork_version == getattr(spec.config, fork_version_field):
|
||||
return phases[fork]
|
||||
raise ValueError("Unknown fork version %s" % fork_version)
|
||||
|
||||
|
||||
def needs_upgrade_to_capella(d_spec, s_spec):
|
||||
return is_post_capella(s_spec) and not is_post_capella(d_spec)
|
||||
|
||||
|
||||
def needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
return is_post_deneb(s_spec) and not is_post_deneb(d_spec)
|
||||
|
||||
|
||||
def check_lc_header_equal(d_spec, s_spec, data, upgraded):
|
||||
assert upgraded.beacon.slot == data.beacon.slot
|
||||
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
|
||||
if is_post_capella(s_spec):
|
||||
if is_post_capella(d_spec):
|
||||
assert s_spec.get_lc_execution_root(upgraded) == d_spec.get_lc_execution_root(data)
|
||||
else:
|
||||
assert s_spec.get_lc_execution_root(upgraded) == s_spec.Root()
|
||||
|
||||
|
||||
def check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.header, upgraded.header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
|
||||
|
||||
|
||||
def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_capella(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_update_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.attested_header, upgraded.attested_header)
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
|
||||
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.sync_aggregate == data.sync_aggregate
|
||||
assert upgraded.signature_slot == data.signature_slot
|
||||
|
||||
|
||||
def upgrade_lc_update_to_store(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_update_to_capella(upgraded)
|
||||
check_lc_update_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_update_to_deneb(upgraded)
|
||||
check_lc_update_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_store_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
if upgraded.best_valid_update is None:
|
||||
assert data.best_valid_update is None
|
||||
else:
|
||||
check_lc_update_equal(d_spec, s_spec, data.best_valid_update, upgraded.best_valid_update)
|
||||
check_lc_header_equal(d_spec, s_spec, data.optimistic_header, upgraded.optimistic_header)
|
||||
assert upgraded.previous_max_active_participants == data.previous_max_active_participants
|
||||
assert upgraded.current_max_active_participants == data.current_max_active_participants
|
||||
|
||||
|
||||
def upgrade_lc_store_to_new_spec(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_store_to_capella(upgraded)
|
||||
check_lc_store_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_store_to_deneb(upgraded)
|
||||
check_lc_store_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
class LightClientSyncTest(object):
|
||||
steps: List[Dict[str, Any]]
|
||||
genesis_validators_root: Any
|
||||
s_spec: Any
|
||||
store: Any
|
||||
|
||||
|
||||
def get_store_fork_version(s_spec):
|
||||
if is_post_deneb(s_spec):
|
||||
return s_spec.config.DENEB_FORK_VERSION
|
||||
if is_post_capella(s_spec):
|
||||
return s_spec.config.CAPELLA_FORK_VERSION
|
||||
return s_spec.config.ALTAIR_FORK_VERSION
|
||||
|
||||
|
||||
def setup_test(spec, state, s_spec=None, phases=None):
|
||||
test = LightClientSyncTest()
|
||||
test.steps = []
|
||||
|
||||
if s_spec is None:
|
||||
s_spec = spec
|
||||
test.s_spec = s_spec
|
||||
|
||||
yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex()
|
||||
test.genesis_validators_root = state.genesis_validators_root
|
||||
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
|
||||
trusted_block = state_transition_with_full_block(spec, state, True, True)
|
||||
trusted_block_root = trusted_block.message.hash_tree_root()
|
||||
bootstrap = spec.create_light_client_bootstrap(state, trusted_block)
|
||||
yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex()
|
||||
yield "bootstrap", bootstrap
|
||||
test.store = spec.initialize_light_client_store(trusted_block_root, bootstrap)
|
||||
|
||||
data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot))
|
||||
data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root)
|
||||
d_spec = get_spec_for_fork_version(spec, data_fork_version, phases)
|
||||
data = d_spec.create_light_client_bootstrap(state, trusted_block)
|
||||
yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest)
|
||||
yield "bootstrap", data
|
||||
|
||||
upgraded = upgrade_lc_bootstrap_to_store(d_spec, test.s_spec, data)
|
||||
test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded)
|
||||
store_fork_version = get_store_fork_version(test.s_spec)
|
||||
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
|
||||
yield "store_fork_digest", "meta", encode_hex(store_fork_digest)
|
||||
|
||||
return test
|
||||
|
||||
@@ -47,62 +188,97 @@ def finish_test(test):
|
||||
yield "steps", test.steps
|
||||
|
||||
|
||||
def get_update_file_name(spec, update):
|
||||
if spec.is_sync_committee_update(update):
|
||||
def get_update_file_name(d_spec, update):
|
||||
if d_spec.is_sync_committee_update(update):
|
||||
suffix1 = "s"
|
||||
else:
|
||||
suffix1 = "x"
|
||||
if spec.is_finality_update(update):
|
||||
if d_spec.is_finality_update(update):
|
||||
suffix2 = "f"
|
||||
else:
|
||||
suffix2 = "x"
|
||||
return f"update_{encode_hex(update.attested_header.hash_tree_root())}_{suffix1}{suffix2}"
|
||||
return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}"
|
||||
|
||||
|
||||
def get_checks(store):
|
||||
def get_checks(s_spec, store):
|
||||
if is_post_capella(s_spec):
|
||||
return {
|
||||
"finalized_header": {
|
||||
'slot': int(store.finalized_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()),
|
||||
'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)),
|
||||
},
|
||||
"optimistic_header": {
|
||||
'slot': int(store.optimistic_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()),
|
||||
'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)),
|
||||
},
|
||||
}
|
||||
|
||||
return {
|
||||
"finalized_header": {
|
||||
'slot': int(store.finalized_header.slot),
|
||||
'beacon_root': encode_hex(store.finalized_header.hash_tree_root()),
|
||||
'slot': int(store.finalized_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()),
|
||||
},
|
||||
"optimistic_header": {
|
||||
'slot': int(store.optimistic_header.slot),
|
||||
'beacon_root': encode_hex(store.optimistic_header.hash_tree_root()),
|
||||
'slot': int(store.optimistic_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def emit_force_update(test, spec, state):
|
||||
current_slot = state.slot
|
||||
spec.process_light_client_store_force_update(test.store, current_slot)
|
||||
test.s_spec.process_light_client_store_force_update(test.store, current_slot)
|
||||
|
||||
yield from [] # Consistently enable `yield from` syntax in calling tests
|
||||
test.steps.append({
|
||||
"force_update": {
|
||||
"current_slot": int(current_slot),
|
||||
"checks": get_checks(test.store),
|
||||
"checks": get_checks(test.s_spec, test.store),
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True):
|
||||
update = spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block)
|
||||
def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None):
|
||||
data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot))
|
||||
data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root)
|
||||
d_spec = get_spec_for_fork_version(spec, data_fork_version, phases)
|
||||
data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block)
|
||||
if not with_next:
|
||||
update.next_sync_committee = spec.SyncCommittee()
|
||||
update.next_sync_committee_branch = \
|
||||
data.next_sync_committee = spec.SyncCommittee()
|
||||
data.next_sync_committee_branch = \
|
||||
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
current_slot = state.slot
|
||||
spec.process_light_client_update(test.store, update, current_slot, test.genesis_validators_root)
|
||||
|
||||
yield get_update_file_name(spec, update), update
|
||||
upgraded = upgrade_lc_update_to_store(d_spec, test.s_spec, data)
|
||||
test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root)
|
||||
|
||||
yield get_update_file_name(d_spec, data), data
|
||||
test.steps.append({
|
||||
"process_update": {
|
||||
"update": get_update_file_name(spec, update),
|
||||
"update_fork_digest": encode_hex(data_fork_digest),
|
||||
"update": get_update_file_name(d_spec, data),
|
||||
"current_slot": int(current_slot),
|
||||
"checks": get_checks(test.store),
|
||||
"checks": get_checks(test.s_spec, test.store),
|
||||
}
|
||||
})
|
||||
return upgraded
|
||||
|
||||
|
||||
def emit_upgrade_store(test, new_s_spec, phases=None):
|
||||
test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store)
|
||||
test.s_spec = new_s_spec
|
||||
store_fork_version = get_store_fork_version(test.s_spec)
|
||||
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
|
||||
|
||||
yield from [] # Consistently enable `yield from` syntax in calling tests
|
||||
test.steps.append({
|
||||
"upgrade_store": {
|
||||
"store_fork_digest": encode_hex(store_fork_digest),
|
||||
"checks": get_checks(test.s_spec, test.store),
|
||||
}
|
||||
})
|
||||
return update
|
||||
|
||||
|
||||
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
|
||||
@@ -141,10 +317,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance to next sync committee period
|
||||
# ```
|
||||
@@ -167,10 +343,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Signature in next period
|
||||
# ```
|
||||
@@ -193,10 +369,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Finalized header not included
|
||||
# ```
|
||||
@@ -214,10 +390,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Non-finalized case: Attested `next_sync_committee` is not finalized
|
||||
# ```
|
||||
@@ -236,10 +412,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Force-update using timeout
|
||||
# ```
|
||||
@@ -256,10 +432,10 @@ def test_light_client_sync(spec, state):
|
||||
attested_state = state.copy()
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 1)
|
||||
yield from emit_force_update(test, spec, state)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == store_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == store_state.slot
|
||||
|
||||
# Edge case: Finalized header not included, after force-update
|
||||
# ```
|
||||
@@ -275,10 +451,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Finalized header older than store
|
||||
# ```
|
||||
@@ -296,15 +472,15 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
yield from emit_force_update(test, spec, state)
|
||||
assert test.store.finalized_header.slot == attested_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == attested_state.slot
|
||||
assert test.store.next_sync_committee == attested_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance to next sync committee period
|
||||
# ```
|
||||
@@ -327,10 +503,10 @@ def test_light_client_sync(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
@@ -357,10 +533,10 @@ def test_supply_sync_committee_from_past_update(spec, state):
|
||||
|
||||
# Apply `LightClientUpdate` from the past, populating `store.next_sync_committee`
|
||||
yield from emit_update(test, spec, past_state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == state.slot
|
||||
assert test.store.finalized_header.beacon.slot == state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
@@ -383,10 +559,10 @@ def test_advance_finality_without_sync_committee(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance finality into next sync committee period, but omit `next_sync_committee`
|
||||
transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
|
||||
@@ -402,10 +578,10 @@ def test_advance_finality_without_sync_committee(spec, state):
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance finality once more, with `next_sync_committee` still unknown
|
||||
past_state = finalized_state
|
||||
@@ -419,24 +595,238 @@ def test_advance_finality_without_sync_committee(spec, state):
|
||||
|
||||
# Apply `LightClientUpdate` without `finalized_header` nor `next_sync_committee`
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, None, with_next=False)
|
||||
assert test.store.finalized_header.slot == past_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == past_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Apply `LightClientUpdate` with `finalized_header` but no `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Apply full `LightClientUpdate`, supplying `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
def run_test_single_fork(spec, phases, state, fork):
|
||||
# Start test
|
||||
test = yield from setup_test(spec, state, phases=phases)
|
||||
|
||||
# Initial `LightClientUpdate`
|
||||
finalized_block = spec.SignedBeaconBlock()
|
||||
finalized_block.message.state_root = state.hash_tree_root()
|
||||
finalized_state = state.copy()
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Jump to two slots before fork
|
||||
fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH')
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(
|
||||
test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Perform `LightClientStore` upgrade
|
||||
yield from emit_upgrade_store(test, phases[fork], phases=phases)
|
||||
update = test.store.best_valid_update
|
||||
|
||||
# Final slot before fork, check that importing the pre-fork format still works
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Upgrade to post-fork spec, attested block is still before the fork
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False)
|
||||
spec = phases[fork]
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Another block after the fork, this time attested block is after the fork
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Jump to next epoch
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finalize the fork
|
||||
finalized_block = block.copy()
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_config_overrides({
|
||||
'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
|
||||
}, emit=False)
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=CAPELLA)
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_capella_fork(spec, phases, state):
|
||||
yield from run_test_single_fork(spec, phases, state, CAPELLA)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_config_overrides({
|
||||
'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
|
||||
}, emit=False)
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=DENEB)
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_deneb_fork(spec, phases, state):
|
||||
yield from run_test_single_fork(spec, phases, state, DENEB)
|
||||
|
||||
|
||||
def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
|
||||
# Start test
|
||||
test = yield from setup_test(spec, state, phases[fork_2], phases)
|
||||
|
||||
# Set up so that finalized is from `spec`, ...
|
||||
finalized_block = spec.SignedBeaconBlock()
|
||||
finalized_block.message.state_root = state.hash_tree_root()
|
||||
finalized_state = state.copy()
|
||||
|
||||
# ..., attested is from `fork_1`, ...
|
||||
fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH')
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1)
|
||||
state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False)
|
||||
spec = phases[fork_1]
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
|
||||
# ..., and signature is from `fork_2`
|
||||
fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH')
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1)
|
||||
state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False)
|
||||
spec = phases[fork_2]
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
|
||||
# Check that update applies
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB])
|
||||
@spec_test
|
||||
@with_config_overrides({
|
||||
'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
|
||||
'DENEB_FORK_EPOCH': 4,
|
||||
}, emit=False)
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=DENEB)
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_capella_deneb_fork(spec, phases, state):
|
||||
yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB)
|
||||
|
||||
|
||||
def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork):
|
||||
# Start test (Legacy bootstrap with an upgraded store)
|
||||
test = yield from setup_test(spec, state, phases[fork], phases)
|
||||
|
||||
# Initial `LightClientUpdate` (check that the upgraded store can process it)
|
||||
finalized_block = spec.SignedBeaconBlock()
|
||||
finalized_block.message.state_root = state.hash_tree_root()
|
||||
finalized_state = state.copy()
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=CAPELLA)
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_capella_store_with_legacy_data(spec, phases, state):
|
||||
yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA)
|
||||
|
||||
|
||||
@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=DENEB)
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_deneb_store_with_legacy_data(spec, phases, state):
|
||||
yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB)
|
||||
|
||||
@@ -9,45 +9,23 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
signed_block_to_header,
|
||||
create_update,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
)
|
||||
from math import floor
|
||||
|
||||
|
||||
def create_update(spec, test, with_next, with_finality, participation_rate):
|
||||
def create_test_update(spec, test, with_next, with_finality, participation_rate):
|
||||
attested_state, attested_block, finalized_block = test
|
||||
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
|
||||
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
|
||||
if with_next:
|
||||
next_sync_committee = attested_state.next_sync_committee
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
else:
|
||||
next_sync_committee = spec.SyncCommittee()
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
||||
if with_finality:
|
||||
finalized_header = signed_block_to_header(spec, finalized_block)
|
||||
finality_branch = spec.compute_merkle_proof_for_state(attested_state, spec.FINALIZED_ROOT_INDEX)
|
||||
else:
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, attested_state, num_participants)
|
||||
|
||||
return spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
return create_update(
|
||||
spec,
|
||||
attested_state,
|
||||
attested_block,
|
||||
finalized_block,
|
||||
with_next,
|
||||
with_finality,
|
||||
participation_rate,
|
||||
)
|
||||
|
||||
|
||||
@@ -59,7 +37,7 @@ def test_update_ranking(spec, state):
|
||||
# - `sig_finalized` / `sig_attested` --> Only signature in next sync committee period
|
||||
# - `att_finalized` / `att_attested` --> Attested header also in next sync committee period
|
||||
# - `fin_finalized` / `fin_attested` --> Finalized header also in next sync committee period
|
||||
# - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.slot`
|
||||
# - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.beacon.slot`
|
||||
next_slots(spec, state, spec.compute_start_slot_at_epoch(spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 3) - 1)
|
||||
sig_finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
@@ -84,76 +62,76 @@ def test_update_ranking(spec, state):
|
||||
# Create updates (in descending order of quality)
|
||||
updates = [
|
||||
# Updates with sync committee finality
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without sync committee finality
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, att, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, att, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without indication of any finality
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, att, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, att, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
|
||||
# Updates with sync committee finality but no `next_sync_committee`
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without sync committee finality and also no `next_sync_committee`
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_test_update(spec, att, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_test_update(spec, att, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without indication of any finality nor `next_sync_committee`
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, att, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, att, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
|
||||
# Updates with low sync committee participation
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, att, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, att, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, att, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, att, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
|
||||
# Updates with very low sync committee participation
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, att, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, att, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, att, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_test_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, att, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_test_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
]
|
||||
yield "updates", updates
|
||||
|
||||
|
||||
@@ -11,43 +11,44 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
initialize_light_client_store,
|
||||
signed_block_to_header,
|
||||
create_update,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
)
|
||||
|
||||
|
||||
def setup_test(spec, state):
|
||||
trusted_block = spec.SignedBeaconBlock()
|
||||
trusted_block.message.state_root = state.hash_tree_root()
|
||||
trusted_block_root = trusted_block.message.hash_tree_root()
|
||||
bootstrap = spec.create_light_client_bootstrap(state, trusted_block)
|
||||
store = spec.initialize_light_client_store(trusted_block_root, bootstrap)
|
||||
store.next_sync_committee = state.next_sync_committee
|
||||
|
||||
return (trusted_block, store)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test_with_matching_config
|
||||
def test_process_light_client_update_not_timeout(spec, state):
|
||||
store = initialize_light_client_store(spec, state)
|
||||
genesis_block, store = setup_test(spec, state)
|
||||
|
||||
# Block at slot 1 doesn't increase sync committee period, so it won't force update store.finalized_header
|
||||
attested_block = state_transition_with_full_block(spec, state, False, False)
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
|
||||
# Sync committee signing the attested_header
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
|
||||
next_sync_committee = spec.SyncCommittee()
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
signature_slot = state.slot + 1
|
||||
|
||||
# Ensure that finality checkpoint is genesis
|
||||
assert state.finalized_checkpoint.epoch == 0
|
||||
# Finality is unchanged
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
update = create_update(
|
||||
spec,
|
||||
attested_state=state,
|
||||
attested_block=attested_block,
|
||||
finalized_block=genesis_block,
|
||||
with_next=False,
|
||||
with_finality=False,
|
||||
participation_rate=1.0,
|
||||
)
|
||||
|
||||
pre_store = deepcopy(store)
|
||||
@@ -64,34 +65,25 @@ def test_process_light_client_update_not_timeout(spec, state):
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_at_period_boundary(spec, state):
|
||||
store = initialize_light_client_store(spec, state)
|
||||
genesis_block, store = setup_test(spec, state)
|
||||
|
||||
# Forward to slot before next sync committee period so that next block is final one in period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 2)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
attested_block = state_transition_with_full_block(spec, state, False, False)
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
signature_slot = state.slot + 1
|
||||
|
||||
# Sync committee signing the attested_header
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
|
||||
next_sync_committee = spec.SyncCommittee()
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
||||
# Finality is unchanged
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
update = create_update(
|
||||
spec,
|
||||
attested_state=state,
|
||||
attested_block=attested_block,
|
||||
finalized_block=genesis_block,
|
||||
with_next=False,
|
||||
with_finality=False,
|
||||
participation_rate=1.0,
|
||||
)
|
||||
|
||||
pre_store = deepcopy(store)
|
||||
@@ -108,35 +100,25 @@ def test_process_light_client_update_at_period_boundary(spec, state):
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_timeout(spec, state):
|
||||
store = initialize_light_client_store(spec, state)
|
||||
genesis_block, store = setup_test(spec, state)
|
||||
|
||||
# Forward to next sync committee period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period + 1 == update_period
|
||||
|
||||
attested_block = state_transition_with_full_block(spec, state, False, False)
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
signature_slot = state.slot + 1
|
||||
|
||||
# Sync committee signing the attested_header
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
|
||||
|
||||
# Sync committee is updated
|
||||
next_sync_committee = state.next_sync_committee
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
# Finality is unchanged
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
update = create_update(
|
||||
spec,
|
||||
attested_state=state,
|
||||
attested_block=attested_block,
|
||||
finalized_block=genesis_block,
|
||||
with_next=True,
|
||||
with_finality=False,
|
||||
participation_rate=1.0,
|
||||
)
|
||||
|
||||
pre_store = deepcopy(store)
|
||||
@@ -153,7 +135,7 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_finality_updated(spec, state):
|
||||
store = initialize_light_client_store(spec, state)
|
||||
_, store = setup_test(spec, state)
|
||||
|
||||
# Change finality
|
||||
blocks = []
|
||||
@@ -164,33 +146,26 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||
# Ensure that finality checkpoint has changed
|
||||
assert state.finalized_checkpoint.epoch == 3
|
||||
# Ensure that it's same period
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
attested_block = blocks[-1]
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
signature_slot = state.slot + 1
|
||||
|
||||
# Sync committee signing the attested_header
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, state)
|
||||
|
||||
# Updated sync_committee and finality
|
||||
next_sync_committee = spec.SyncCommittee()
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
# Updated finality
|
||||
finalized_block = blocks[spec.SLOTS_PER_EPOCH - 1]
|
||||
finalized_header = signed_block_to_header(spec, finalized_block)
|
||||
assert finalized_header.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
assert finalized_header.hash_tree_root() == state.finalized_checkpoint.root
|
||||
finality_branch = spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
|
||||
assert finalized_block.message.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
assert finalized_block.message.hash_tree_root() == state.finalized_checkpoint.root
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
update = create_update(
|
||||
spec,
|
||||
attested_state=state,
|
||||
attested_block=attested_block,
|
||||
finalized_block=finalized_block,
|
||||
with_next=False,
|
||||
with_finality=True,
|
||||
participation_rate=1.0,
|
||||
)
|
||||
|
||||
spec.process_light_client_update(store, update, signature_slot, state.genesis_validators_root)
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_configured_state_test,
|
||||
spec_state_test_with_matching_config,
|
||||
spec_test,
|
||||
with_all_phases,
|
||||
with_config_overrides,
|
||||
with_matching_spec_config,
|
||||
with_phases,
|
||||
with_state,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
PHASE0, ALTAIR, BELLATRIX,
|
||||
ALL_PHASES,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import is_post_fork
|
||||
@@ -30,7 +34,7 @@ def test_config_override(spec, state):
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test_with_matching_config
|
||||
def test_override_config_fork_epoch(spec, state):
|
||||
def test_config_override_matching_fork_epochs(spec, state):
|
||||
# Fork schedule must be consistent with state fork
|
||||
epoch = spec.get_current_epoch(state)
|
||||
if is_post_fork(spec.fork, ALTAIR):
|
||||
@@ -56,3 +60,27 @@ def test_override_config_fork_epoch(spec, state):
|
||||
continue
|
||||
fork_epoch_field = fork.upper() + '_FORK_EPOCH'
|
||||
assert getattr(spec.config, fork_epoch_field) <= epoch
|
||||
|
||||
|
||||
@with_phases(phases=[ALTAIR], other_phases=[BELLATRIX])
|
||||
@spec_test
|
||||
@with_config_overrides({
|
||||
'ALTAIR_FORK_VERSION': '0x11111111',
|
||||
'BELLATRIX_FORK_EPOCH': 4,
|
||||
}, emit=False)
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=BELLATRIX)
|
||||
def test_config_override_across_phases(spec, phases, state):
|
||||
assert state.fork.current_version == spec.config.ALTAIR_FORK_VERSION
|
||||
|
||||
assert spec.config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert spec.config.ALTAIR_FORK_EPOCH == 0
|
||||
assert not hasattr(spec.config, 'BELLATRIX_FORK_EPOCH')
|
||||
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_EPOCH == 0
|
||||
assert not hasattr(phases[ALTAIR].config, 'BELLATRIX_FORK_EPOCH')
|
||||
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert phases[BELLATRIX].config.ALTAIR_FORK_EPOCH == 0
|
||||
assert phases[BELLATRIX].config.BELLATRIX_FORK_EPOCH == 4
|
||||
|
||||
@@ -64,6 +64,7 @@ def test_from_syncing_to_invalid(spec, state):
|
||||
block.body.execution_payload.parent_hash = (
|
||||
block_hashes[f'chain_a_{i - 1}'] if i != 0 else block_hashes['block_0']
|
||||
)
|
||||
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_a_{i}', 'UTF-8'))
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes[f'chain_a_{i}'] = block.body.execution_payload.block_hash
|
||||
|
||||
@@ -80,6 +81,7 @@ def test_from_syncing_to_invalid(spec, state):
|
||||
block.body.execution_payload.parent_hash = (
|
||||
block_hashes[f'chain_b_{i - 1}'] if i != 0 else block_hashes['block_0']
|
||||
)
|
||||
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes[f'chain_b_{i}'] = block.body.execution_payload.block_hash
|
||||
|
||||
@@ -92,9 +94,13 @@ def test_from_syncing_to_invalid(spec, state):
|
||||
# Now add block 4 to chain `b` with INVALID
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = signed_blocks_b[-1].message.body.execution_payload.block_hash
|
||||
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes['chain_b_3'] = block.body.execution_payload.block_hash
|
||||
|
||||
# Ensure that no duplicate block hashes
|
||||
assert len(block_hashes) == len(set(block_hashes.values()))
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
payload_status = PayloadStatusV1(
|
||||
status=PayloadStatusV1Status.INVALID,
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_phases, always_bls
|
||||
from eth2spec.test.helpers.constants import CAPELLA, MAINNET
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
with_presets,
|
||||
with_phases,
|
||||
)
|
||||
|
||||
|
||||
def run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=True):
|
||||
@@ -38,14 +44,14 @@ def run_bls_to_execution_change_processing(spec, state, signed_address_change, v
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state)
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_not_activated(spec, state):
|
||||
validator_index = 3
|
||||
@@ -63,7 +69,7 @@ def test_success_not_activated(spec, state):
|
||||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_in_activation_queue(spec, state):
|
||||
validator_index = 3
|
||||
@@ -81,7 +87,7 @@ def test_success_in_activation_queue(spec, state):
|
||||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_in_exit_queue(spec, state):
|
||||
validator_index = 3
|
||||
@@ -94,7 +100,7 @@ def test_success_in_exit_queue(spec, state):
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_exited(spec, state):
|
||||
validator_index = 4
|
||||
@@ -111,7 +117,7 @@ def test_success_exited(spec, state):
|
||||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_withdrawable(spec, state):
|
||||
validator_index = 4
|
||||
@@ -129,7 +135,7 @@ def test_success_withdrawable(spec, state):
|
||||
assert spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_val_index_out_of_range(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
@@ -138,7 +144,7 @@ def test_invalid_val_index_out_of_range(spec, state):
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_already_0x01(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
@@ -150,7 +156,7 @@ def test_invalid_already_0x01(spec, state):
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_from_bls_pubkey(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
@@ -164,7 +170,7 @@ def test_invalid_incorrect_from_bls_pubkey(spec, state):
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_bad_signature(spec, state):
|
||||
@@ -173,3 +179,67 @@ def test_invalid_bad_signature(spec, state):
|
||||
signed_address_change.signature = spec.BLSSignature(b'\x42' * 96)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_genesis_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=spec.config.GENESIS_FORK_VERSION)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_current_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=state.fork.current_version)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_previous_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=state.fork.previous_version)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_genesis_validators_root(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, genesis_validators_root=b'\x99' * 32)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MAINNET], reason="use mainnet fork version")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_valid_signature_from_staking_deposit_cli(spec, state):
|
||||
validator_index = 1
|
||||
from_bls_pubkey = bytes.fromhex('86248e64705987236ec3c41f6a81d96f98e7b85e842a1d71405b216fa75a9917512f3c94c85779a9729c927ea2aa9ed1') # noqa: E501
|
||||
to_execution_address = bytes.fromhex('3434343434343434343434343434343434343434')
|
||||
signature = bytes.fromhex('8cf4219884b326a04f6664b680cd9a99ad70b5280745af1147477aa9f8b4a2b2b38b8688c6a74a06f275ad4e14c5c0c70e2ed37a15ece5bf7c0724a376ad4c03c79e14dd9f633a3d54abc1ce4e73bec3524a789ab9a69d4d06686a8a67c9e4dc') # noqa: E501
|
||||
|
||||
# Use mainnet `genesis_validators_root`
|
||||
state.genesis_validators_root = bytes.fromhex('4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95')
|
||||
validator = state.validators[validator_index]
|
||||
validator.withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(from_bls_pubkey)[1:]
|
||||
|
||||
address_change = spec.BLSToExecutionChange(
|
||||
validator_index=validator_index,
|
||||
from_bls_pubkey=from_bls_pubkey,
|
||||
to_execution_address=to_execution_address,
|
||||
)
|
||||
signed_address_change = spec.SignedBLSToExecutionChange(
|
||||
message=address_change,
|
||||
signature=signature,
|
||||
)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
with_capella_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
@@ -11,7 +10,7 @@ from eth2spec.test.helpers.deposits import (
|
||||
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_top_up_to_withdrawn_validator(spec, state):
|
||||
validator_index = 0
|
||||
|
||||
@@ -4,9 +4,9 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
with_presets,
|
||||
with_phases,
|
||||
with_capella_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MAINNET, MINIMAL, CAPELLA
|
||||
from eth2spec.test.helpers.constants import MAINNET, MINIMAL
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
compute_el_block_hash,
|
||||
@@ -97,7 +97,7 @@ def run_withdrawals_processing(spec, state, execution_payload, num_expected_with
|
||||
return expected_withdrawals
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_zero_expected_withdrawals(spec, state):
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
@@ -108,7 +108,7 @@ def test_success_zero_expected_withdrawals(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_full_withdrawal(spec, state):
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
@@ -125,7 +125,7 @@ def test_success_one_full_withdrawal(spec, state):
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawal(spec, state):
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
@@ -145,14 +145,16 @@ def test_success_one_partial_withdrawal(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_max_per_slot(spec, state):
|
||||
def test_success_mixed_fully_and_partial_withdrawable(spec, state):
|
||||
num_full_withdrawals = spec.MAX_WITHDRAWALS_PER_PAYLOAD // 2
|
||||
num_partial_withdrawals = spec.MAX_WITHDRAWALS_PER_PAYLOAD - num_full_withdrawals
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state,
|
||||
num_full_withdrawals=num_full_withdrawals, num_partial_withdrawals=num_partial_withdrawals)
|
||||
num_full_withdrawals=num_full_withdrawals,
|
||||
num_partial_withdrawals=num_partial_withdrawals,
|
||||
)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
@@ -163,7 +165,7 @@ def test_success_max_per_slot(spec, state):
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@with_presets([MAINNET], reason="too few validators with minimal config")
|
||||
@spec_state_test
|
||||
def test_success_all_fully_withdrawable_in_one_sweep(spec, state):
|
||||
@@ -182,7 +184,7 @@ def test_success_all_fully_withdrawable_in_one_sweep(spec, state):
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_all_fully_withdrawable(spec, state):
|
||||
@@ -201,7 +203,7 @@ def test_success_all_fully_withdrawable(spec, state):
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@with_presets([MAINNET], reason="too few validators with minimal config")
|
||||
@spec_state_test
|
||||
def test_success_all_partially_withdrawable_in_one_sweep(spec, state):
|
||||
@@ -220,7 +222,7 @@ def test_success_all_partially_withdrawable_in_one_sweep(spec, state):
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_all_partially_withdrawable(spec, state):
|
||||
@@ -243,7 +245,7 @@ def test_success_all_partially_withdrawable(spec, state):
|
||||
# Failure cases in which the number of withdrawals in the execution_payload is incorrect
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state):
|
||||
next_slot(spec, state)
|
||||
@@ -260,7 +262,7 @@ def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
@@ -273,7 +275,7 @@ def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, stat
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
|
||||
@@ -286,7 +288,7 @@ def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, s
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=2)
|
||||
@@ -299,7 +301,7 @@ def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec,
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=2)
|
||||
@@ -312,7 +314,7 @@ def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(sp
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
@@ -325,7 +327,7 @@ def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec,
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
@@ -338,7 +340,7 @@ def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(sp
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -351,7 +353,7 @@ def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -364,7 +366,7 @@ def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD,
|
||||
@@ -382,7 +384,7 @@ def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec,
|
||||
# Failure cases in which the withdrawals in the execution_payload are incorrect
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_withdrawal_index(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
@@ -395,7 +397,7 @@ def test_invalid_incorrect_withdrawal_index(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_address_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
@@ -408,7 +410,7 @@ def test_invalid_incorrect_address_full(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_address_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
|
||||
@@ -421,7 +423,7 @@ def test_invalid_incorrect_address_partial(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_amount_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
@@ -434,7 +436,7 @@ def test_invalid_incorrect_amount_full(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_amount_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
@@ -447,7 +449,7 @@ def test_invalid_incorrect_amount_partial(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_one_of_many_incorrectly_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -466,7 +468,7 @@ def test_invalid_one_of_many_incorrectly_full(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_one_of_many_incorrectly_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -485,7 +487,7 @@ def test_invalid_one_of_many_incorrectly_partial(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_many_incorrectly_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -504,7 +506,7 @@ def test_invalid_many_incorrectly_full(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_many_incorrectly_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
@@ -527,7 +529,7 @@ def test_invalid_many_incorrectly_partial(spec, state):
|
||||
# More full withdrawal cases
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
@@ -541,7 +543,7 @@ def test_withdrawable_epoch_but_0_balance(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
@@ -555,7 +557,7 @@ def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
@@ -569,7 +571,7 @@ def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state)
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_no_withdrawals_but_some_next_epoch(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
@@ -583,7 +585,7 @@ def test_no_withdrawals_but_some_next_epoch(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_all_withdrawal(spec, state):
|
||||
# Make all validators withdrawable
|
||||
@@ -619,25 +621,25 @@ def run_random_full_withdrawals_test(spec, state, rng):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_0(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(444))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_1(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(420))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_2(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(200))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_3(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(2000000))
|
||||
@@ -647,7 +649,7 @@ def test_random_full_withdrawals_3(spec, state):
|
||||
# More partial withdrawal cases
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
@@ -663,7 +665,7 @@ def test_success_no_max_effective_balance(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_no_excess_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
@@ -679,7 +681,7 @@ def test_success_no_excess_balance(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_excess_balance_but_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
@@ -696,7 +698,7 @@ def test_success_excess_balance_but_no_max_effective_balance(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_not_yet_active(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
@@ -710,7 +712,7 @@ def test_success_one_partial_withdrawable_not_yet_active(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
@@ -725,7 +727,7 @@ def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
@@ -739,7 +741,7 @@ def test_success_one_partial_withdrawable_exited(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
@@ -753,7 +755,7 @@ def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
@@ -768,7 +770,7 @@ def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_two_partial_withdrawable(spec, state):
|
||||
set_validator_partially_withdrawable(spec, state, 0)
|
||||
@@ -779,7 +781,7 @@ def test_success_two_partial_withdrawable(spec, state):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=2)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_max_partial_withdrawable(spec, state):
|
||||
# Sanity check that this test works for this state
|
||||
@@ -794,7 +796,7 @@ def test_success_max_partial_withdrawable(spec, state):
|
||||
spec, state, execution_payload, num_expected_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="not enough validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_max_plus_one_withdrawable(spec, state):
|
||||
@@ -833,37 +835,37 @@ def run_random_partial_withdrawals_test(spec, state, rng):
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_0(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(0))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_1(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(1))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_2(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(2))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_3(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(3))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_4(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(4))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_5(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(5))
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_historical_summaries_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_summaries_update')
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_historical_summaries_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical batch period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
pre_historical_summaries = state.historical_summaries.copy()
|
||||
|
||||
yield from run_process_historical_summaries_update(spec, state)
|
||||
|
||||
assert len(state.historical_summaries) == len(pre_historical_summaries) + 1
|
||||
summary = state.historical_summaries[len(state.historical_summaries) - 1]
|
||||
assert summary.block_summary_root == state.block_roots.hash_tree_root()
|
||||
assert summary.state_summary_root == state.state_roots.hash_tree_root()
|
||||
@@ -0,0 +1,31 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
with_test_suite_name,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconBlockBody")
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_execution_merkle_proof(spec, state):
|
||||
block = state_transition_with_full_block(spec, state, True, False)
|
||||
|
||||
yield "object", block.message.body
|
||||
execution_branch = spec.compute_merkle_proof_for_block_body(
|
||||
block.message.body, spec.EXECUTION_PAYLOAD_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + block.message.body.execution_payload.hash_tree_root().hex(),
|
||||
"leaf_index": spec.EXECUTION_PAYLOAD_INDEX,
|
||||
"branch": ['0x' + root.hex() for root in execution_branch]
|
||||
}
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=block.message.body.execution_payload.hash_tree_root(),
|
||||
branch=execution_branch,
|
||||
depth=spec.floorlog2(spec.EXECUTION_PAYLOAD_INDEX),
|
||||
index=spec.get_subtree_index(spec.EXECUTION_PAYLOAD_INDEX),
|
||||
root=block.message.body.hash_tree_root(),
|
||||
)
|
||||
@@ -1,23 +1,33 @@
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.context import (
|
||||
with_phases, spec_state_test
|
||||
with_capella_and_later,
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch_via_block,
|
||||
state_transition_and_sign_block,
|
||||
transition_to,
|
||||
next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
build_empty_block,
|
||||
)
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slot,
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_epoch_with_attestations,
|
||||
)
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
set_eth1_withdrawal_credential_with_balance,
|
||||
set_validator_fully_withdrawable,
|
||||
set_validator_partially_withdrawable,
|
||||
prepare_expected_withdrawals,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
)
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
|
||||
|
||||
@@ -25,9 +35,9 @@ from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
# BLSToExecutionChange
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_bls_change(spec, state):
|
||||
def test_bls_change(spec, state):
|
||||
index = 0
|
||||
signed_address_change = get_signed_address_change(spec, state, validator_index=index)
|
||||
pre_credentials = state.validators[index].withdrawal_credentials
|
||||
@@ -48,9 +58,48 @@ def test_success_bls_change(spec, state):
|
||||
assert post_credentials[12:] == signed_address_change.message.to_execution_address
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_exit_and_bls_change(spec, state):
|
||||
def test_deposit_and_bls_change(spec, state):
|
||||
initial_registry_len = len(state.validators)
|
||||
initial_balances_len = len(state.balances)
|
||||
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
||||
signed_address_change = get_signed_address_change(
|
||||
spec, state,
|
||||
validator_index=validator_index,
|
||||
withdrawal_pubkey=deposit.data.pubkey, # Deposit helper defaults to use pubkey as withdrawal credential
|
||||
)
|
||||
|
||||
deposit_credentials = deposit.data.withdrawal_credentials
|
||||
assert deposit_credentials[:1] == spec.BLS_WITHDRAWAL_PREFIX
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.deposits.append(deposit)
|
||||
block.body.bls_to_execution_changes.append(signed_address_change)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert len(state.validators) == initial_registry_len + 1
|
||||
assert len(state.balances) == initial_balances_len + 1
|
||||
validator_credentials = state.validators[validator_index].withdrawal_credentials
|
||||
assert deposit_credentials != validator_credentials
|
||||
assert validator_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
assert validator_credentials[1:12] == b'\x00' * 11
|
||||
assert validator_credentials[12:] == signed_address_change.message.to_execution_address
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_exit_and_bls_change(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
@@ -77,7 +126,7 @@ def test_success_exit_and_bls_change(spec, state):
|
||||
assert spec.is_fully_withdrawable_validator(validator, balance, validator.withdrawable_epoch)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_duplicate_bls_changes_same_block(spec, state):
|
||||
index = 0
|
||||
@@ -96,7 +145,7 @@ def test_invalid_duplicate_bls_changes_same_block(spec, state):
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_two_bls_changes_of_different_addresses_same_validator_same_block(spec, state):
|
||||
index = 0
|
||||
@@ -124,7 +173,7 @@ def test_invalid_two_bls_changes_of_different_addresses_same_validator_same_bloc
|
||||
# Withdrawals
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_full_withdrawal_in_epoch_transition(spec, state):
|
||||
index = 0
|
||||
@@ -145,7 +194,7 @@ def test_full_withdrawal_in_epoch_transition(spec, state):
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_partial_withdrawal_in_epoch_transition(spec, state):
|
||||
index = state.next_withdrawal_index
|
||||
@@ -169,7 +218,7 @@ def test_partial_withdrawal_in_epoch_transition(spec, state):
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_many_partial_withdrawals_in_epoch_transition(spec, state):
|
||||
assert len(state.validators) > spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
@@ -221,7 +270,7 @@ def _perform_valid_withdrawal(spec, state):
|
||||
return pre_state, signed_block_1, pre_next_withdrawal_index
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawal_success_two_blocks(spec, state):
|
||||
pre_state, signed_block_1, pre_next_withdrawal_index = _perform_valid_withdrawal(spec, state)
|
||||
@@ -238,7 +287,7 @@ def test_withdrawal_success_two_blocks(spec, state):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_withdrawal_fail_second_block_payload_isnt_compatible(spec, state):
|
||||
_perform_valid_withdrawal(spec, state)
|
||||
@@ -256,3 +305,156 @@ def test_invalid_withdrawal_fail_second_block_payload_isnt_compatible(spec, stat
|
||||
|
||||
yield 'blocks', [signed_block_2]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
#
|
||||
# Mix top-ups and withdrawals
|
||||
#
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_top_up_and_partial_withdrawable_validator(spec, state):
|
||||
next_withdrawal_validator_index = 0
|
||||
validator_index = next_withdrawal_validator_index + 1
|
||||
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index, spec.MAX_EFFECTIVE_BALANCE)
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
assert not spec.is_partially_withdrawable_validator(validator, balance)
|
||||
|
||||
# Make a top-up balance to validator
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.deposits.append(deposit)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
# Since withdrawals happen before deposits, it becomes partially withdrawable after state transition.
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
assert spec.is_partially_withdrawable_validator(validator, balance)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_top_up_to_fully_withdrawn_validator(spec, state):
|
||||
"""
|
||||
Similar to `teste_process_deposit::test_success_top_up_to_withdrawn_validator` test.
|
||||
"""
|
||||
next_withdrawal_validator_index = 0
|
||||
validator_index = next_withdrawal_validator_index + 1
|
||||
|
||||
# Fully withdraw validator
|
||||
set_validator_fully_withdrawable(spec, state, validator_index)
|
||||
assert state.balances[validator_index] > 0
|
||||
next_epoch_via_block(spec, state)
|
||||
assert state.balances[validator_index] == 0
|
||||
assert state.validators[validator_index].effective_balance > 0
|
||||
next_epoch_via_block(spec, state)
|
||||
assert state.validators[validator_index].effective_balance == 0
|
||||
|
||||
# Make a top-up deposit to validator
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.deposits.append(deposit)
|
||||
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
assert spec.is_fully_withdrawable_validator(
|
||||
state.validators[validator_index],
|
||||
state.balances[validator_index],
|
||||
spec.get_current_epoch(state)
|
||||
)
|
||||
|
||||
# Apply an empty block
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# With mainnet preset, it holds
|
||||
if len(state.validators) <= spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP:
|
||||
assert not spec.is_fully_withdrawable_validator(
|
||||
state.validators[validator_index],
|
||||
state.balances[validator_index],
|
||||
spec.get_current_epoch(state)
|
||||
)
|
||||
|
||||
yield 'blocks', [signed_block_1, signed_block_2]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def _insert_validator(spec, state, balance):
|
||||
effective_balance = balance if balance < spec.MAX_EFFECTIVE_BALANCE else spec.MAX_EFFECTIVE_BALANCE
|
||||
validator_index = len(state.validators)
|
||||
validator = spec.Validator(
|
||||
pubkey=pubkeys[validator_index],
|
||||
withdrawal_credentials=spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + b'\x00' * 11 + b'\x56' * 20,
|
||||
activation_eligibility_epoch=1,
|
||||
activation_epoch=2,
|
||||
exit_epoch=spec.FAR_FUTURE_EPOCH,
|
||||
withdrawable_epoch=spec.FAR_FUTURE_EPOCH,
|
||||
effective_balance=effective_balance,
|
||||
)
|
||||
state.validators.append(validator)
|
||||
state.balances.append(balance)
|
||||
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(0)
|
||||
|
||||
return validator_index
|
||||
|
||||
|
||||
def _run_activate_and_partial_withdrawal(spec, state, initial_balance):
|
||||
validator_index = _insert_validator(spec, state, balance=initial_balance)
|
||||
|
||||
# To make it eligibile activation
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(2) - 1)
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
blocks = []
|
||||
# To activate
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
if initial_balance > spec.MAX_EFFECTIVE_BALANCE:
|
||||
assert spec.is_partially_withdrawable_validator(
|
||||
state.validators[validator_index], state.balances[validator_index])
|
||||
else:
|
||||
assert not spec.is_partially_withdrawable_validator(
|
||||
state.validators[validator_index], state.balances[validator_index])
|
||||
|
||||
_, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
|
||||
blocks += new_blocks
|
||||
|
||||
yield 'blocks', blocks
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_activate_and_partial_withdrawal_max_effective_balance(spec, state):
|
||||
yield from _run_activate_and_partial_withdrawal(spec, state, initial_balance=spec.MAX_EFFECTIVE_BALANCE)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_activate_and_partial_withdrawal_overdeposit(spec, state):
|
||||
yield from _run_activate_and_partial_withdrawal(spec, state, initial_balance=spec.MAX_EFFECTIVE_BALANCE + 10000000)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
import importlib
|
||||
|
||||
@@ -6,12 +7,12 @@ from eth2spec.phase0 import mainnet as spec_phase0_mainnet, minimal as spec_phas
|
||||
from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal
|
||||
from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal
|
||||
from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal
|
||||
from eth2spec.eip4844 import mainnet as spec_eip4844_mainnet, minimal as spec_eip4844_minimal
|
||||
from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
MINIMAL, MAINNET,
|
||||
ALL_PHASES,
|
||||
ALL_FORK_UPGRADES,
|
||||
@@ -77,14 +78,14 @@ spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
ALTAIR: spec_altair_minimal,
|
||||
BELLATRIX: spec_bellatrix_minimal,
|
||||
CAPELLA: spec_capella_minimal,
|
||||
EIP4844: spec_eip4844_minimal,
|
||||
DENEB: spec_deneb_minimal,
|
||||
},
|
||||
MAINNET: {
|
||||
PHASE0: spec_phase0_mainnet,
|
||||
ALTAIR: spec_altair_mainnet,
|
||||
BELLATRIX: spec_bellatrix_mainnet,
|
||||
CAPELLA: spec_capella_mainnet,
|
||||
EIP4844: spec_eip4844_mainnet
|
||||
DENEB: spec_deneb_mainnet
|
||||
},
|
||||
}
|
||||
|
||||
@@ -309,14 +310,18 @@ def config_fork_epoch_overrides(spec, state):
|
||||
return overrides
|
||||
|
||||
|
||||
def spec_state_test_with_matching_config(fn):
|
||||
def with_matching_spec_config(emitted_fork=None):
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
conf = config_fork_epoch_overrides(spec, kw['state'])
|
||||
overrides = with_config_overrides(conf)
|
||||
return overrides(fn)(*args, spec=spec, **kw)
|
||||
overrides = config_fork_epoch_overrides(spec, kw['state'])
|
||||
deco = with_config_overrides(overrides, emitted_fork)
|
||||
return deco(fn)(*args, spec=spec, **kw)
|
||||
return wrapper
|
||||
return spec_test(with_state(decorator(single_phase(fn))))
|
||||
return decorator
|
||||
|
||||
|
||||
def spec_state_test_with_matching_config(fn):
|
||||
return spec_test(with_state(with_matching_spec_config()(single_phase(fn))))
|
||||
|
||||
|
||||
def expect_assertion_error(fn):
|
||||
@@ -422,7 +427,7 @@ def with_all_phases_except(exclusion_phases):
|
||||
with_altair_and_later = with_all_phases_from(ALTAIR)
|
||||
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
|
||||
with_capella_and_later = with_all_phases_from(CAPELLA)
|
||||
with_eip4844_and_later = with_all_phases_from(EIP4844)
|
||||
with_deneb_and_later = with_all_phases_from(DENEB)
|
||||
|
||||
|
||||
def _get_preset_targets(kw):
|
||||
@@ -557,10 +562,30 @@ def _get_copy_of_spec(spec):
|
||||
module_spec = importlib.util.find_spec(module_path)
|
||||
module = importlib.util.module_from_spec(module_spec)
|
||||
module_spec.loader.exec_module(module)
|
||||
|
||||
# Preserve existing config overrides
|
||||
module.config = deepcopy(spec.config)
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def with_config_overrides(config_overrides):
|
||||
def spec_with_config_overrides(spec, config_overrides):
|
||||
# apply our overrides to a copy of it, and apply it to the spec
|
||||
config = spec.config._asdict()
|
||||
config.update((k, config_overrides[k]) for k in config.keys() & config_overrides.keys())
|
||||
config_types = spec.Configuration.__annotations__
|
||||
modified_config = {k: config_types[k](v) for k, v in config.items()}
|
||||
|
||||
spec.config = spec.Configuration(**modified_config)
|
||||
|
||||
# To output the changed config in a format compatible with yaml test vectors,
|
||||
# the dict SSZ objects have to be converted into Python built-in types.
|
||||
output_config = _get_basic_dict(modified_config)
|
||||
|
||||
return spec, output_config
|
||||
|
||||
|
||||
def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
|
||||
"""
|
||||
WARNING: the spec_test decorator must wrap this, to ensure the decorated test actually runs.
|
||||
This decorator forces the test to yield, and pytest doesn't run generator tests, and instead silently passes it.
|
||||
@@ -570,23 +595,26 @@ def with_config_overrides(config_overrides):
|
||||
"""
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
spec = _get_copy_of_spec(spec)
|
||||
# Apply config overrides to spec
|
||||
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
|
||||
|
||||
# apply our overrides to a copy of it, and apply it to the spec
|
||||
config = spec.config._asdict()
|
||||
config.update(config_overrides)
|
||||
config_types = spec.Configuration.__annotations__
|
||||
modified_config = {k: config_types[k](v) for k, v in config.items()}
|
||||
# Apply config overrides to additional phases, if present
|
||||
if 'phases' in kw:
|
||||
phases = {}
|
||||
for fork in kw['phases']:
|
||||
phases[fork], output = spec_with_config_overrides(
|
||||
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
if emitted_fork == fork:
|
||||
output_config = output
|
||||
kw['phases'] = phases
|
||||
|
||||
# To output the changed config to could be serialized with yaml test vectors,
|
||||
# the dict SSZ objects have to be converted into Python built-in types.
|
||||
output_config = _get_basic_dict(modified_config)
|
||||
yield 'config', 'cfg', output_config
|
||||
|
||||
spec.config = spec.Configuration(**modified_config)
|
||||
# Emit requested spec (with overrides)
|
||||
if emit:
|
||||
yield 'config', 'cfg', output_config
|
||||
|
||||
# Run the function
|
||||
out = fn(*args, spec=spec, **kw)
|
||||
|
||||
# If it's not returning None like a normal test function,
|
||||
# it's generating things, and we need to complete it before setting back the config.
|
||||
if out is not None:
|
||||
|
||||
@@ -7,76 +7,76 @@ from eth2spec.test.context import (
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CAPELLA, EIP4844,
|
||||
CAPELLA, DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
from eth2spec.test.helpers.eip4844.fork import (
|
||||
EIP4844_FORK_TEST_META_TAGS,
|
||||
from eth2spec.test.helpers.deneb.fork import (
|
||||
DENEB_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_base_state(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch(spec, phases, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_many_next_epoch(spec, phases, state):
|
||||
for _ in range(3):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_low_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_misc_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_large_validator_set(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
@@ -0,0 +1,84 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_presets,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CAPELLA, DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.deneb.fork import (
|
||||
DENEB_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import randomize_state
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_0(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1010))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_1(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(2020))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_2(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(3030))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_3(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(4040))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_low_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(5050))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_misc_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(6060))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[DENEB])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(DENEB_FORK_TEST_META_TAGS)
|
||||
def test_deneb_fork_random_large_validator_set(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(7070))
|
||||
yield from run_fork_test(phases[DENEB], state)
|
||||
@@ -4,7 +4,7 @@ Please do not edit this file manually.
|
||||
See the README for that generator for more information.
|
||||
"""
|
||||
|
||||
from eth2spec.test.helpers.constants import EIP4844
|
||||
from eth2spec.test.helpers.constants import DENEB
|
||||
from eth2spec.test.context import (
|
||||
misc_balances_in_default_range_with_many_validators,
|
||||
with_phases,
|
||||
@@ -23,7 +23,7 @@ from eth2spec.test.utils.randomized_block_tests import (
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -36,11 +36,11 @@ def test_randomized_0(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -49,7 +49,7 @@ def test_randomized_0(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -62,11 +62,11 @@ def test_randomized_1(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -75,7 +75,7 @@ def test_randomized_1(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -88,11 +88,11 @@ def test_randomized_2(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -101,7 +101,7 @@ def test_randomized_2(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -114,11 +114,11 @@ def test_randomized_3(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -127,7 +127,7 @@ def test_randomized_3(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -140,11 +140,11 @@ def test_randomized_4(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -153,7 +153,7 @@ def test_randomized_4(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -166,11 +166,11 @@ def test_randomized_5(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -179,7 +179,7 @@ def test_randomized_5(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -192,11 +192,11 @@ def test_randomized_6(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -205,7 +205,7 @@ def test_randomized_6(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -218,11 +218,11 @@ def test_randomized_7(spec, state):
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -231,7 +231,7 @@ def test_randomized_7(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -244,11 +244,11 @@ def test_randomized_8(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -257,7 +257,7 @@ def test_randomized_8(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -270,11 +270,11 @@ def test_randomized_9(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -283,7 +283,7 @@ def test_randomized_9(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -296,11 +296,11 @@ def test_randomized_10(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -309,7 +309,7 @@ def test_randomized_10(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -322,11 +322,11 @@ def test_randomized_11(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -335,7 +335,7 @@ def test_randomized_11(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -348,11 +348,11 @@ def test_randomized_12(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -361,7 +361,7 @@ def test_randomized_12(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -374,11 +374,11 @@ def test_randomized_13(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -387,7 +387,7 @@ def test_randomized_13(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -400,11 +400,11 @@ def test_randomized_14(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -413,7 +413,7 @@ def test_randomized_14(spec, state):
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
@@ -426,11 +426,11 @@ def test_randomized_15(spec, state):
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
# epochs:0,slots:0,with-block:random_block_deneb
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
@@ -6,7 +6,7 @@ from eth2spec.test.helpers.block import (
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
@@ -16,7 +16,7 @@ from eth2spec.test.helpers.sharding import (
|
||||
)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_one_blob(spec, state):
|
||||
yield 'pre', state
|
||||
@@ -32,7 +32,7 @@ def test_one_blob(spec, state):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_max_blobs(spec, state):
|
||||
yield 'pre', state
|
||||
@@ -0,0 +1,54 @@
|
||||
from eth2spec.test.context import (
|
||||
ForkMeta,
|
||||
always_bls,
|
||||
with_fork_metas,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
AFTER_DENEB_PRE_POST_FORKS,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
OperationType,
|
||||
run_transition_with_operation,
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# BLSToExecutionChange
|
||||
#
|
||||
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2)
|
||||
for pre, post in AFTER_DENEB_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_btec_right_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a BLS_TO_EXECUTION_CHANGE right *after* the transition
|
||||
"""
|
||||
yield from run_transition_with_operation(
|
||||
state,
|
||||
fork_epoch,
|
||||
spec,
|
||||
post_spec,
|
||||
pre_tag,
|
||||
post_tag,
|
||||
operation_type=OperationType.BLS_TO_EXECUTION_CHANGE,
|
||||
operation_at_slot=fork_epoch * spec.SLOTS_PER_EPOCH,
|
||||
)
|
||||
|
||||
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2)
|
||||
for pre, post in AFTER_DENEB_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_btec_right_before_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a BLS_TO_EXECUTION_CHANGE right *before* the transition
|
||||
"""
|
||||
yield from run_transition_with_operation(
|
||||
state,
|
||||
fork_epoch,
|
||||
spec,
|
||||
post_spec,
|
||||
pre_tag,
|
||||
post_tag,
|
||||
operation_type=OperationType.BLS_TO_EXECUTION_CHANGE,
|
||||
operation_at_slot=fork_epoch * spec.SLOTS_PER_EPOCH - 1,
|
||||
)
|
||||
@@ -0,0 +1,55 @@
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
||||
|
||||
def _run_validate_blobs(spec, state, blob_count):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Also test the proof generation in `get_blob_sidecars`
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs)
|
||||
blobs = [sidecar.blob for sidecar in blob_sidecars]
|
||||
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
|
||||
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_zero_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_one_blob(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=1)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_two_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=2)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_max_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
@@ -0,0 +1,109 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
get_poly_in_both_forms,
|
||||
eval_poly_in_coeff_form,
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_verify_kzg_proof(spec, state):
|
||||
x = 3
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
proof = spec.compute_kzg_proof_impl(polynomial, x)
|
||||
|
||||
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
|
||||
assert spec.verify_kzg_proof_impl(commitment, x, y, proof)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_barycentric_outside_domain(spec, state):
|
||||
"""
|
||||
Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain
|
||||
(the roots of unity).
|
||||
|
||||
Then make sure that we would get the same result if we evaluated it from coefficient form without using the
|
||||
barycentric formula
|
||||
"""
|
||||
rng = random.Random(5566)
|
||||
poly_coeff, poly_eval = get_poly_in_both_forms(spec)
|
||||
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
|
||||
|
||||
assert len(poly_coeff) == len(poly_eval) == len(roots_of_unity_brp)
|
||||
n_samples = 12
|
||||
|
||||
for _ in range(n_samples):
|
||||
# Get a random evaluation point and make sure it's not a root of unity
|
||||
z = rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
while z in roots_of_unity_brp:
|
||||
z = rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
|
||||
# Get p(z) by evaluating poly in coefficient form
|
||||
p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z)
|
||||
|
||||
# Get p(z) by evaluating poly in evaluation form
|
||||
p_z_eval = spec.evaluate_polynomial_in_evaluation_form(poly_eval, z)
|
||||
|
||||
# Both evaluations should agree
|
||||
assert p_z_coeff == p_z_eval
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_barycentric_within_domain(spec, state):
|
||||
"""
|
||||
Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain
|
||||
(the roots of unity).
|
||||
|
||||
Then make sure that we would get the same result if we evaluated it from coefficient form without using the
|
||||
barycentric formula
|
||||
"""
|
||||
poly_coeff, poly_eval = get_poly_in_both_forms(spec)
|
||||
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
|
||||
|
||||
assert len(poly_coeff) == len(poly_eval) == len(roots_of_unity_brp)
|
||||
n = len(poly_coeff)
|
||||
|
||||
# Iterate over the entire domain
|
||||
for i in range(n):
|
||||
# Grab a root of unity and use it as the evaluation point
|
||||
z = int(roots_of_unity_brp[i])
|
||||
|
||||
# Get p(z) by evaluating poly in coefficient form
|
||||
p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z)
|
||||
|
||||
# Get p(z) by evaluating poly in evaluation form
|
||||
p_z_eval = spec.evaluate_polynomial_in_evaluation_form(poly_eval, z)
|
||||
|
||||
# The two evaluations should be agree and p(z) should also be the i-th "coefficient" of the polynomial in
|
||||
# evaluation form
|
||||
assert p_z_coeff == p_z_eval == poly_eval[i]
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_compute_kzg_proof_within_domain(spec, state):
|
||||
"""
|
||||
Create and verify KZG proof that p(z) == y
|
||||
where z is in the domain of our KZG scheme (i.e. a relevant root of unity).
|
||||
"""
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
|
||||
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
|
||||
|
||||
for i, z in enumerate(roots_of_unity_brp):
|
||||
proof = spec.compute_kzg_proof_impl(polynomial, z)
|
||||
|
||||
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z)
|
||||
assert spec.verify_kzg_proof_impl(commitment, z, y, proof)
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
from eth2spec.test.helpers.constants import (
|
||||
EIP4844,
|
||||
DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
@@ -13,7 +13,7 @@ from eth2spec.test.context import (
|
||||
)
|
||||
|
||||
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_blob_to_kzg_commitment(spec, state):
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
from eth2spec.test.helpers.constants import (
|
||||
EIP4844,
|
||||
DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
@@ -13,7 +13,7 @@ from eth2spec.test.context import (
|
||||
)
|
||||
|
||||
|
||||
@with_phases([EIP4844])
|
||||
@with_phases([DENEB])
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_tx_peek_blob_versioned_hashes(spec, state):
|
||||
@@ -1,40 +0,0 @@
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_eip4844_and_later
|
||||
|
||||
|
||||
def run_bls_to_execution_change_processing_no_op(spec, state, signed_address_change, valid=True):
|
||||
"""
|
||||
Run ``process_bls_to_execution_change``, yielding:
|
||||
- pre-state ('pre')
|
||||
- address-change ('address_change')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
pre_state = state.copy()
|
||||
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
|
||||
yield 'address_change', signed_address_change
|
||||
|
||||
# If the address_change is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_bls_to_execution_change(state, signed_address_change))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# process address change
|
||||
spec.process_bls_to_execution_change(state, signed_address_change)
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
||||
# Make sure state has NOT been changed
|
||||
assert state == pre_state
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_no_op(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state)
|
||||
yield from run_bls_to_execution_change_processing_no_op(spec, state, signed_address_change)
|
||||
@@ -1,41 +0,0 @@
|
||||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_eip4844_and_later
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
|
||||
|
||||
def run_withdrawals_processing(spec, state, execution_payload, valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
pre_state = state.copy()
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution_payload', execution_payload
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_withdrawals(state, execution_payload))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_withdrawals(state, execution_payload)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
# Make sure state has NOT been changed
|
||||
assert state == pre_state
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_no_op(spec, state):
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
@@ -1,84 +0,0 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_presets,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CAPELLA, EIP4844,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.eip4844.fork import (
|
||||
EIP4844_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import randomize_state
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_0(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1010))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_1(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(2020))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_2(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(3030))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_3(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(4040))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_low_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(5050))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_misc_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(6060))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_large_validator_set(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(7070))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
@@ -1,20 +0,0 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_verify_kzg_proof(spec, state):
|
||||
x = 3
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
proof = spec.compute_kzg_proof(polynomial, x)
|
||||
|
||||
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
|
||||
assert spec.verify_kzg_proof_impl(commitment, x, y, proof)
|
||||
@@ -1,53 +0,0 @@
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
||||
|
||||
def _run_validate_blobs_sidecar_test(spec, state, blob_count):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
|
||||
expected_commitments = [spec.blob_to_kzg_commitment(blobs[i]) for i in range(blob_count)]
|
||||
spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_zero_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_one_blob(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=1)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_two_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=2)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_max_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
@@ -2,7 +2,12 @@ from eth2spec.utils import bls
|
||||
from eth2spec.test.helpers.keys import pubkeys, privkeys, pubkey_to_privkey
|
||||
|
||||
|
||||
def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubkey=None, to_execution_address=None):
|
||||
def get_signed_address_change(spec, state,
|
||||
validator_index=None,
|
||||
withdrawal_pubkey=None,
|
||||
to_execution_address=None,
|
||||
fork_version=None,
|
||||
genesis_validators_root=None):
|
||||
if validator_index is None:
|
||||
validator_index = 0
|
||||
|
||||
@@ -16,7 +21,15 @@ def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubk
|
||||
if to_execution_address is None:
|
||||
to_execution_address = b'\x42' * 20
|
||||
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
if genesis_validators_root is None:
|
||||
genesis_validators_root = state.genesis_validators_root
|
||||
|
||||
domain = spec.compute_domain(
|
||||
spec.DOMAIN_BLS_TO_EXECUTION_CHANGE,
|
||||
fork_version=fork_version,
|
||||
genesis_validators_root=genesis_validators_root,
|
||||
)
|
||||
|
||||
address_change = spec.BLSToExecutionChange(
|
||||
validator_index=validator_index,
|
||||
from_bls_pubkey=withdrawal_pubkey,
|
||||
|
||||
@@ -29,14 +29,12 @@ def run_fork_test(post_spec, pre_state):
|
||||
'inactivity_scores',
|
||||
# Sync
|
||||
'current_sync_committee', 'next_sync_committee',
|
||||
# Execution
|
||||
'latest_execution_payload_header',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
modified_fields = ['fork', 'latest_execution_payload_header']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
|
||||
@@ -14,29 +14,35 @@ CAPELLA = SpecForkName('capella')
|
||||
SHARDING = SpecForkName('sharding')
|
||||
CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
EIP4844 = SpecForkName('eip4844')
|
||||
DENEB = SpecForkName('deneb')
|
||||
|
||||
# The forks that pytest can run with.
|
||||
ALL_PHASES = (
|
||||
# Formal forks
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA,
|
||||
# Experimental patches
|
||||
EIP4844,
|
||||
DENEB,
|
||||
)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844)
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB)
|
||||
|
||||
# TODO: no EIP4844 fork tests now. Should add when we figure out the content of Capella.
|
||||
# TODO: no DENEB fork tests now. Should add when we figure out the content of Capella.
|
||||
ALL_FORK_UPGRADES = {
|
||||
# pre_fork_name: post_fork_name
|
||||
PHASE0: ALTAIR,
|
||||
ALTAIR: BELLATRIX,
|
||||
BELLATRIX: CAPELLA,
|
||||
CAPELLA: EIP4844,
|
||||
CAPELLA: DENEB,
|
||||
}
|
||||
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
|
||||
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0}
|
||||
AFTER_BELLATRIX_PRE_POST_FORKS = AFTER_BELLATRIX_UPGRADES.items()
|
||||
AFTER_CAPELLA_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
|
||||
if key not in [PHASE0, ALTAIR]}
|
||||
AFTER_CAPELLA_PRE_POST_FORKS = AFTER_CAPELLA_UPGRADES.items()
|
||||
AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
|
||||
if key not in [PHASE0, ALTAIR, BELLATRIX]}
|
||||
AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items()
|
||||
|
||||
#
|
||||
# Config
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
from eth2spec.test.helpers.constants import (
|
||||
EIP4844,
|
||||
DENEB,
|
||||
)
|
||||
|
||||
|
||||
EIP4844_FORK_TEST_META_TAGS = {
|
||||
'fork': EIP4844,
|
||||
DENEB_FORK_TEST_META_TAGS = {
|
||||
'fork': DENEB,
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_eip4844(pre_state)
|
||||
post_state = post_spec.upgrade_to_deneb(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
@@ -57,7 +57,7 @@ def run_fork_test(post_spec, pre_state):
|
||||
assert getattr(pre_validator, field) == getattr(post_validator, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION
|
||||
assert post_state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
@@ -1,5 +1,8 @@
|
||||
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_altair,
|
||||
is_post_capella,
|
||||
)
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
@@ -22,7 +25,10 @@ def get_process_calls(spec):
|
||||
'process_effective_balance_updates',
|
||||
'process_slashings_reset',
|
||||
'process_randao_mixes_reset',
|
||||
'process_historical_roots_update',
|
||||
# Capella replaced `process_historical_roots_update` with `process_historical_summaries_update`
|
||||
'process_historical_summaries_update' if is_post_capella(spec) else (
|
||||
'process_historical_roots_update'
|
||||
),
|
||||
# Altair replaced `process_participation_record_updates` with `process_participation_flag_updates`
|
||||
'process_participation_flag_updates' if is_post_altair(spec) else (
|
||||
'process_participation_record_updates'
|
||||
|
||||
@@ -4,7 +4,7 @@ from rlp import encode
|
||||
from rlp.sedes import big_endian_int, Binary, List
|
||||
|
||||
from eth2spec.debug.random_value import get_random_bytes_list
|
||||
from eth2spec.test.helpers.forks import is_post_capella, is_post_eip4844
|
||||
from eth2spec.test.helpers.forks import is_post_capella, is_post_deneb
|
||||
|
||||
|
||||
def get_execution_payload_header(spec, execution_payload):
|
||||
@@ -26,6 +26,8 @@ def get_execution_payload_header(spec, execution_payload):
|
||||
)
|
||||
if is_post_capella(spec):
|
||||
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
|
||||
if is_post_deneb(spec):
|
||||
payload_header.excess_data_gas = execution_payload.excess_data_gas
|
||||
return payload_header
|
||||
|
||||
|
||||
@@ -87,7 +89,7 @@ def compute_el_header_block_hash(spec,
|
||||
if is_post_capella(spec):
|
||||
# withdrawals_root
|
||||
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
|
||||
if is_post_eip4844(spec):
|
||||
if is_post_deneb(spec):
|
||||
# excess_data_gas
|
||||
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
|
||||
|
||||
@@ -108,7 +110,7 @@ def get_withdrawal_rlp(spec, withdrawal):
|
||||
# address
|
||||
(Binary(20, 20), withdrawal.address),
|
||||
# amount
|
||||
(big_endian_int, spec.uint256(withdrawal.amount) * (10**9)),
|
||||
(big_endian_int, withdrawal.amount),
|
||||
]
|
||||
|
||||
sedes = List([schema for schema, _ in withdrawal_rlp])
|
||||
|
||||
@@ -9,11 +9,12 @@ from eth2spec.test.helpers.block import (
|
||||
build_empty_block,
|
||||
sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
BELLATRIX,
|
||||
CAPELLA,
|
||||
EIP4844,
|
||||
DENEB,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
@@ -36,6 +37,7 @@ class OperationType(Enum):
|
||||
ATTESTER_SLASHING = auto()
|
||||
DEPOSIT = auto()
|
||||
VOLUNTARY_EXIT = auto()
|
||||
BLS_TO_EXECUTION_CHANGE = auto()
|
||||
|
||||
|
||||
def _set_operations_by_dict(block, operation_dict):
|
||||
@@ -151,8 +153,8 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
|
||||
state = post_spec.upgrade_to_bellatrix(state)
|
||||
elif post_spec.fork == CAPELLA:
|
||||
state = post_spec.upgrade_to_capella(state)
|
||||
elif post_spec.fork == EIP4844:
|
||||
state = post_spec.upgrade_to_eip4844(state)
|
||||
elif post_spec.fork == DENEB:
|
||||
state = post_spec.upgrade_to_deneb(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
|
||||
@@ -165,9 +167,9 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
|
||||
elif post_spec.fork == CAPELLA:
|
||||
assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
elif post_spec.fork == EIP4844:
|
||||
elif post_spec.fork == DENEB:
|
||||
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
|
||||
@@ -267,6 +269,10 @@ def run_transition_with_operation(state,
|
||||
selected_validator_index = 0
|
||||
signed_exits = prepare_signed_exits(spec, state, [selected_validator_index])
|
||||
operation_dict = {'voluntary_exits': signed_exits}
|
||||
elif operation_type == OperationType.BLS_TO_EXECUTION_CHANGE:
|
||||
selected_validator_index = 0
|
||||
bls_to_execution_changes = [get_signed_address_change(spec, state, selected_validator_index)]
|
||||
operation_dict = {'bls_to_execution_changes': bls_to_execution_changes}
|
||||
|
||||
def _check_state():
|
||||
if operation_type == OperationType.PROPOSER_SLASHING:
|
||||
@@ -288,6 +294,9 @@ def run_transition_with_operation(state,
|
||||
elif operation_type == OperationType.VOLUNTARY_EXIT:
|
||||
validator = state.validators[selected_validator_index]
|
||||
assert validator.exit_epoch < post_spec.FAR_FUTURE_EPOCH
|
||||
elif operation_type == OperationType.BLS_TO_EXECUTION_CHANGE:
|
||||
validator = state.validators[selected_validator_index]
|
||||
assert validator.withdrawal_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
|
||||
yield "pre", state
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from .constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
)
|
||||
|
||||
|
||||
def is_post_fork(a, b):
|
||||
if a == EIP4844:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844]
|
||||
if a == DENEB:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB]
|
||||
if a == CAPELLA:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA]
|
||||
if a == BELLATRIX:
|
||||
@@ -29,5 +29,5 @@ def is_post_capella(spec):
|
||||
return is_post_fork(spec.fork, CAPELLA)
|
||||
|
||||
|
||||
def is_post_eip4844(spec):
|
||||
return is_post_fork(spec.fork, EIP4844)
|
||||
def is_post_deneb(spec):
|
||||
return is_post_fork(spec.fork, DENEB)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_header_block_hash,
|
||||
@@ -77,9 +77,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
elif spec.fork == CAPELLA:
|
||||
previous_version = spec.config.BELLATRIX_FORK_VERSION
|
||||
current_version = spec.config.CAPELLA_FORK_VERSION
|
||||
elif spec.fork == EIP4844:
|
||||
elif spec.fork == DENEB:
|
||||
previous_version = spec.config.CAPELLA_FORK_VERSION
|
||||
current_version = spec.config.EIP4844_FORK_VERSION
|
||||
current_version = spec.config.DENEB_FORK_VERSION
|
||||
|
||||
state = spec.BeaconState(
|
||||
genesis_time=0,
|
||||
|
||||
@@ -5,28 +5,7 @@ from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
compute_committee_indices,
|
||||
)
|
||||
|
||||
|
||||
def signed_block_to_header(spec, block):
|
||||
return spec.BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=block.message.body.hash_tree_root(),
|
||||
)
|
||||
|
||||
|
||||
def initialize_light_client_store(spec, state):
|
||||
return spec.LightClientStore(
|
||||
finalized_header=spec.BeaconBlockHeader(),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
next_sync_committee=state.next_sync_committee,
|
||||
best_valid_update=None,
|
||||
optimistic_header=spec.BeaconBlockHeader(),
|
||||
previous_max_active_participants=0,
|
||||
current_max_active_participants=0,
|
||||
)
|
||||
from math import floor
|
||||
|
||||
|
||||
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
||||
@@ -60,3 +39,32 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
||||
sync_committee_signature=sync_committee_signature,
|
||||
)
|
||||
return sync_aggregate, signature_slot
|
||||
|
||||
|
||||
def create_update(spec,
|
||||
attested_state,
|
||||
attested_block,
|
||||
finalized_block,
|
||||
with_next,
|
||||
with_finality,
|
||||
participation_rate):
|
||||
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
|
||||
|
||||
update = spec.LightClientUpdate()
|
||||
|
||||
update.attested_header = spec.block_to_light_client_header(attested_block)
|
||||
|
||||
if with_next:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
|
||||
if with_finality:
|
||||
update.finalized_header = spec.block_to_light_client_header(finalized_block)
|
||||
update.finality_branch = spec.compute_merkle_proof_for_state(
|
||||
attested_state, spec.FINALIZED_ROOT_INDEX)
|
||||
|
||||
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
|
||||
spec, attested_state, num_participants)
|
||||
|
||||
return update
|
||||
|
||||
@@ -12,7 +12,7 @@ from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
|
||||
|
||||
#
|
||||
# Containers from EIP-4844
|
||||
# Containers from Deneb
|
||||
#
|
||||
MAX_CALLDATA_SIZE = 2**24
|
||||
MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
|
||||
@@ -66,6 +66,38 @@ def get_sample_blob(spec, rng=None):
|
||||
return spec.Blob(b)
|
||||
|
||||
|
||||
def eval_poly_in_coeff_form(spec, coeffs, x):
|
||||
"""
|
||||
Evaluate a polynomial in coefficient form at 'x' using Horner's rule
|
||||
"""
|
||||
total = 0
|
||||
for a in reversed(coeffs):
|
||||
total = (total * x + a) % spec.BLS_MODULUS
|
||||
return total % spec.BLS_MODULUS
|
||||
|
||||
|
||||
def get_poly_in_both_forms(spec, rng=None):
|
||||
"""
|
||||
Generate and return a random polynomial in both coefficient form and evaluation form
|
||||
"""
|
||||
if rng is None:
|
||||
rng = random.Random(5566)
|
||||
|
||||
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
|
||||
|
||||
coeffs = [
|
||||
rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
]
|
||||
|
||||
evals = [
|
||||
eval_poly_in_coeff_form(spec, coeffs, int(z))
|
||||
for z in roots_of_unity_brp
|
||||
]
|
||||
|
||||
return coeffs, evals
|
||||
|
||||
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
blobs = []
|
||||
blob_kzg_commitments = []
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, ALTAIR, BELLATRIX,
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
@@ -8,7 +12,7 @@ def run_process_historical_roots_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, ALTAIR, BELLATRIX])
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
|
||||
@@ -30,7 +30,7 @@ from eth2spec.test.helpers.sync_committee import (
|
||||
compute_sync_committee_participant_reward_and_penalty,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0, MINIMAL
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix, is_post_capella
|
||||
from eth2spec.test.context import (
|
||||
spec_test, spec_state_test, dump_skipping_message,
|
||||
with_phases, with_all_phases, single_phase,
|
||||
@@ -1026,7 +1026,10 @@ def test_balance_driven_status_transitions(spec, state):
|
||||
@always_bls
|
||||
def test_historical_batch(spec, state):
|
||||
state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1
|
||||
pre_historical_roots_len = len(state.historical_roots)
|
||||
pre_historical_roots = state.historical_roots.copy()
|
||||
|
||||
if is_post_capella(spec):
|
||||
pre_historical_summaries = state.historical_summaries.copy()
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
@@ -1038,7 +1041,14 @@ def test_historical_batch(spec, state):
|
||||
|
||||
assert state.slot == block.slot
|
||||
assert spec.get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0
|
||||
assert len(state.historical_roots) == pre_historical_roots_len + 1
|
||||
|
||||
# check history update
|
||||
if is_post_capella(spec):
|
||||
# Frozen `historical_roots`
|
||||
assert state.historical_roots == pre_historical_roots
|
||||
assert len(state.historical_summaries) == len(pre_historical_summaries) + 1
|
||||
else:
|
||||
assert len(state.historical_roots) == len(pre_historical_roots) + 1
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella,
|
||||
)
|
||||
from eth2spec.test.helpers.state import get_state_root
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
@@ -61,3 +64,26 @@ def test_over_epoch_boundary(spec, state):
|
||||
yield 'slots', int(slots)
|
||||
spec.process_slots(state, state.slot + slots)
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_accumulator(spec, state):
|
||||
pre_historical_roots = state.historical_roots.copy()
|
||||
|
||||
if is_post_capella(spec):
|
||||
pre_historical_summaries = state.historical_summaries.copy()
|
||||
|
||||
yield 'pre', state
|
||||
slots = spec.SLOTS_PER_HISTORICAL_ROOT
|
||||
yield 'slots', int(slots)
|
||||
spec.process_slots(state, state.slot + slots)
|
||||
yield 'post', state
|
||||
|
||||
# check history update
|
||||
if is_post_capella(spec):
|
||||
# Frozen `historical_roots`
|
||||
assert state.historical_roots == pre_historical_roots
|
||||
assert len(state.historical_summaries) == len(pre_historical_summaries) + 1
|
||||
else:
|
||||
assert len(state.historical_roots) == len(pre_historical_roots) + 1
|
||||
|
||||
@@ -88,7 +88,7 @@ def randomize_state_capella(spec, state, stats, exit_fraction=0.1, slash_fractio
|
||||
return scenario_state
|
||||
|
||||
|
||||
def randomize_state_eip4844(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1):
|
||||
def randomize_state_deneb(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1):
|
||||
scenario_state = randomize_state_capella(spec,
|
||||
state,
|
||||
stats,
|
||||
@@ -232,7 +232,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random(
|
||||
return block
|
||||
|
||||
|
||||
def random_block_eip4844(spec, state, signed_blocks, scenario_state, rng=Random(3456)):
|
||||
def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)):
|
||||
block = random_block_capella(spec, state, signed_blocks, scenario_state)
|
||||
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1)
|
||||
|
||||
@@ -41,11 +41,10 @@ Sub-transitions:
|
||||
- `effective_balance_updates`
|
||||
- `slashings_reset`
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `historical_roots_update` (Phase0, Altair, Bellatrix only)
|
||||
- `historical_summaries_update` (Capella)
|
||||
- `participation_record_updates` (Phase 0 only)
|
||||
- `participation_flag_updates` (Altair)
|
||||
- `sync_committee_updates` (Altair)
|
||||
- `full_withdrawals` (Capella)
|
||||
- `partial_withdrawals` (Capella)
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
||||
@@ -9,11 +9,15 @@ This series of tests provides reference test vectors for validating that a light
|
||||
```yaml
|
||||
genesis_validators_root: Bytes32 -- string, hex encoded, with 0x prefix
|
||||
trusted_block_root: Bytes32 -- string, hex encoded, with 0x prefix
|
||||
bootstrap_fork_digest: string -- Encoded `ForkDigest`-context of `bootstrap`
|
||||
store_fork_digest: string -- Encoded `ForkDigest`-context of `store` object being tested
|
||||
```
|
||||
|
||||
### `bootstrap.ssz_snappy`
|
||||
|
||||
An SSZ-snappy encoded `bootstrap` object of type `LightClientBootstrap` to initialize a local `store` object of type `LightClientStore` using `initialize_light_client_store(trusted_block_rooot, bootstrap)`.
|
||||
An SSZ-snappy encoded `bootstrap` object of type `LightClientBootstrap` to initialize a local `store` object of type `LightClientStore` with `store_fork_digest` using `initialize_light_client_store(trusted_block_rooot, bootstrap)`. The SSZ type can be determined from `bootstrap_fork_digest`.
|
||||
|
||||
If `store_fork_digest` differs from `bootstrap_fork_digest`, the `bootstrap` object may need to be upgraded before initializing the store.
|
||||
|
||||
### `steps.yaml`
|
||||
|
||||
@@ -25,12 +29,14 @@ Each step includes checks to verify the expected impact on the `store` object.
|
||||
|
||||
```yaml
|
||||
finalized_header: {
|
||||
slot: int, -- Integer value from store.finalized_header.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.finalized_header.hash_tree_root()
|
||||
slot: int, -- Integer value from store.finalized_header.beacon.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.finalized_header.beacon.hash_tree_root()
|
||||
execution_root: string, -- From Capella onward; get_lc_execution_root(store.finalized_header)
|
||||
}
|
||||
optimistic_header: {
|
||||
slot: int, -- Integer value from store.optimistic_header.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.optimistic_header.hash_tree_root()
|
||||
slot: int, -- Integer value from store.optimistic_header.beacon.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.optimistic_header.beacon.hash_tree_root()
|
||||
execution_root: string, -- From Capella onward; get_lc_execution_root(store.optimistic_header)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -54,6 +60,7 @@ The function `process_light_client_update(store, update, current_slot, genesis_v
|
||||
|
||||
```yaml
|
||||
{
|
||||
update_fork_digest: string -- Encoded `ForkDigest`-context of `update`
|
||||
update: string -- name of the `*.ssz_snappy` file to load
|
||||
as a `LightClientUpdate` object
|
||||
current_slot: int -- integer, decimal
|
||||
@@ -61,6 +68,21 @@ The function `process_light_client_update(store, update, current_slot, genesis_v
|
||||
}
|
||||
```
|
||||
|
||||
If `store_fork_digest` differs from `update_fork_digest`, the `update` object may need to be upgraded before processing the update.
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
#### `upgrade_store`
|
||||
|
||||
The `store` should be upgraded to reflect the new `store_fork_digest`:
|
||||
|
||||
```yaml
|
||||
{
|
||||
store_fork_digest: string -- Encoded `ForkDigest`-context of `store`
|
||||
checks: {<store_attibute>: value} -- the assertions.
|
||||
}
|
||||
```
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
## Condition
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user