Merge branch 'ethereum:dev' into patch-2

This commit is contained in:
omahs
2023-03-25 12:19:31 +01:00
committed by GitHub
63 changed files with 4760 additions and 1105 deletions

1
.gitignore vendored
View File

@@ -20,6 +20,7 @@ tests/core/pyspec/eth2spec/altair/
tests/core/pyspec/eth2spec/bellatrix/
tests/core/pyspec/eth2spec/capella/
tests/core/pyspec/eth2spec/deneb/
tests/core/pyspec/eth2spec/eip6110/
# coverage reports
.htmlcov

View File

@@ -23,16 +23,18 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
# To check generator matching:
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \
$(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \
$(wildcard $(SPEC_DIR)/bellatrix/*.md) \
$(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \
$(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \
$(wildcard $(SPEC_DIR)/_features/custody/*.md) \
$(wildcard $(SPEC_DIR)/_features/das/*.md) \
$(wildcard $(SPEC_DIR)/_features/sharding/*.md) \
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
$(wildcard $(SPEC_DIR)/*/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110
# The parameters for commands. Use `foreach` to avoid listing specs again.
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S)
COV_HTML_OUT=.htmlcov
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html
@@ -63,15 +65,14 @@ partial_clean:
rm -f .coverage
rm -rf $(PY_SPEC_DIR)/.pytest_cache
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
rm -rf $(ETH2SPEC_MODULE_DIR)/phase0
rm -rf $(ETH2SPEC_MODULE_DIR)/altair
rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix
rm -rf $(ETH2SPEC_MODULE_DIR)/capella
rm -rf $(ETH2SPEC_MODULE_DIR)/deneb
rm -rf $(COV_HTML_OUT_DIR)
rm -rf $(TEST_REPORT_DIR)
rm -rf eth2spec.egg-info dist build
rm -rf build
rm -rf build;
@for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \
echo $$spec_name; \
rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \
done
clean: partial_clean
rm -rf venv
@@ -105,21 +106,21 @@ install_test:
# Testing against `minimal` or `mainnet` config by default
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
# Testing against `minimal` or `mainnet` config by default
find_test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -k=$(K) --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
citest: pyspec
mkdir -p $(TEST_REPORT_DIR);
ifdef fork
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
else
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
endif
@@ -137,13 +138,11 @@ check_toc: $(MARKDOWN_FILES:=.toc)
codespell:
codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist
# TODO: add future protocol upgrade patch packages to linting.
# NOTE: we use `pylint` just for catching unused arguments in spec code
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb
&& pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
&& mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \

View File

@@ -20,15 +20,16 @@ Features are researched and developed in parallel, and then consolidated into se
| 0 | **Phase0** |`0` | <ul><li>Core</li><ul><li>[The beacon chain](specs/phase0/beacon-chain.md)</li><li>[Deposit contract](specs/phase0/deposit-contract.md)</li><li>[Beacon chain fork choice](specs/phase0/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide](specs/phase0/validator.md)</li><li>[P2P networking](specs/phase0/p2p-interface.md)</li><li>[Weak subjectivity](specs/phase0/weak-subjectivity.md)</li></ul></ul> |
| 1 | **Altair** | `74240` | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/altair/beacon-chain.md)</li><li>[Altair fork](specs/altair/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))</li><li>[Honest validator guide changes](specs/altair/validator.md)</li><li>[P2P networking](specs/altair/p2p-interface.md)</li></ul></ul> |
| 2 | **Bellatrix** <br/> (["The Merge"](https://ethereum.org/en/upgrades/merge/)) | `144896` | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/bellatrix/beacon-chain.md)</li><li>[Bellatrix fork](specs/bellatrix/fork.md)</li><li>[Fork choice changes](specs/bellatrix/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/bellatrix/validator.md)</li><li>[P2P networking](specs/bellatrix/p2p-interface.md)</li></ul></ul> |
| 3 | **Capella** | `194048` | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
### In-development Specifications
| Code Name or Topic | Specs | Notes |
| - | - | - |
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
| Deneb (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/deneb/beacon-chain.md)</li><li>[Deneb fork](specs/deneb/fork.md)</li><li>[Polynomial commitments](specs/deneb/polynomial-commitments.md)</li><li>[Fork choice changes](specs/deneb/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))</li></ul><ul><li>[Honest validator guide changes](specs/deneb/validator.md)</li><li>[P2P networking](specs/deneb/p2p-interface.md)</li></ul></ul> |
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/_features/sharding/p2p-interface.md)</li></ul></ul> |
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/_features/das/das-core.md)</li><li>[Fork choice changes](specs/_features/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/_features/das/p2p-interface.md)</li><li>[Sampling process](specs/_features/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
| EIP-6110 | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)</li><li>[EIP-6110 fork](specs/_features/eip6110/fork.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/eip6110/validator.md)</li></ul></ul> |
### Accompanying documents can be found in [specs](specs) and include:

View File

@@ -8,4 +8,4 @@ Please see [Releases](https://github.com/ethereum/consensus-specs/releases/). We
**Please do not file a public ticket** mentioning the vulnerability.
To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://eth2bounty.ethereum.org](https://eth2bounty.ethereum.org) or email eth2bounty@ethereum.org. Please read the [disclosure page](https://eth2bounty.ethereum.org) for more information about publicly disclosed security vulnerabilities.
To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://ethereum.org/bug-bounty](https://ethereum.org/bug-bounty) or email bounty@ethereum.org. Please read the [disclosure page](https://ethereum.org/bug-bounty) for more information about publicly disclosed security vulnerabilities.

View File

@@ -46,7 +46,7 @@ BELLATRIX_FORK_VERSION: 0x02000000
BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
# Capella
CAPELLA_FORK_VERSION: 0x03000000
CAPELLA_FORK_EPOCH: 18446744073709551615
CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC
# Deneb
DENEB_FORK_VERSION: 0x04000000
DENEB_FORK_EPOCH: 18446744073709551615

View File

@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Fork Choice
# ---------------------------------------------------------------
# 2**3 (= 8)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei

View File

@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Fork Choice
# ---------------------------------------------------------------
# 2**1 (= 1)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei

View File

@@ -47,6 +47,7 @@ ALTAIR = 'altair'
BELLATRIX = 'bellatrix'
CAPELLA = 'capella'
DENEB = 'deneb'
EIP6110 = 'eip6110'
# The helper functions that are used when defining constants
@@ -653,9 +654,9 @@ T = TypeVar('T') # For generic function
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]:
def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]:
# pylint: disable=unused-argument
return "TEST"'''
return ("TEST", "TEST")'''
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
@@ -667,9 +668,22 @@ def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[Blobs
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
#
# EIP6110SpecBuilder
#
class EIP6110SpecBuilder(CapellaSpecBuilder):
fork: str = EIP6110
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.capella import {preset_name} as capella
'''
spec_builders = {
builder.fork: builder
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder)
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder)
}
@@ -968,14 +982,14 @@ class PySpecCommand(Command):
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB):
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
"""
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB):
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/altair/light-client/full-node.md
specs/altair/light-client/light-client.md
@@ -987,7 +1001,7 @@ class PySpecCommand(Command):
specs/altair/validator.md
specs/altair/p2p-interface.md
"""
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB):
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/bellatrix/beacon-chain.md
specs/bellatrix/fork.md
@@ -996,7 +1010,7 @@ class PySpecCommand(Command):
specs/bellatrix/p2p-interface.md
sync/optimistic.md
"""
if self.spec_fork in (CAPELLA, DENEB):
if self.spec_fork in (CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/capella/light-client/fork.md
specs/capella/light-client/full-node.md
@@ -1021,6 +1035,11 @@ class PySpecCommand(Command):
specs/deneb/p2p-interface.md
specs/deneb/validator.md
"""
if self.spec_fork == EIP6110:
self.md_doc_paths += """
specs/_features/eip6110/beacon-chain.md
specs/_features/eip6110/fork.md
"""
if len(self.md_doc_paths) == 0:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@@ -1174,5 +1193,6 @@ setup(
RUAMEL_YAML_VERSION,
"lru-dict==1.1.8",
MARKO_VERSION,
"py_arkworks_bls12381==0.3.4",
]
)

View File

@@ -0,0 +1,324 @@
# EIP-6110 -- The Beacon Chain
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Constants](#constants)
- [Misc](#misc)
- [Preset](#preset)
- [Execution](#execution)
- [Containers](#containers)
- [New containers](#new-containers)
- [`DepositReceipt`](#depositreceipt)
- [Extended Containers](#extended-containers)
- [`ExecutionPayload`](#executionpayload)
- [`ExecutionPayloadHeader`](#executionpayloadheader)
- [`BeaconState`](#beaconstate)
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
- [Block processing](#block-processing)
- [Modified `process_operations`](#modified-process_operations)
- [New `process_deposit_receipt`](#new-process_deposit_receipt)
- [Modified `process_execution_payload`](#modified-process_execution_payload)
- [Testing](#testing)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This is the beacon chain specification of in-protocol deposits processing mechanism.
This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110).
*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development.
## Constants
The following values are (non-configurable) constants used throughout the specification.
### Misc
| Name | Value |
| - | - |
| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` |
## Preset
### Execution
| Name | Value | Description |
| - | - | - |
| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | Maximum number of deposit receipts allowed in each payload |
## Containers
### New containers
#### `DepositReceipt`
```python
class DepositReceipt(Container):
pubkey: BLSPubkey
withdrawal_credentials: Bytes32
amount: Gwei
signature: BLSSignature
index: uint64
```
### Extended Containers
#### `ExecutionPayload`
```python
class ExecutionPayload(Container):
# Execution block header fields
parent_hash: Hash32
fee_recipient: ExecutionAddress
state_root: Bytes32
receipts_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
prev_randao: Bytes32
block_number: uint64
gas_limit: uint64
gas_used: uint64
timestamp: uint64
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
base_fee_per_gas: uint256
# Extra payload fields
block_hash: Hash32
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP-6110]
```
#### `ExecutionPayloadHeader`
```python
class ExecutionPayloadHeader(Container):
# Execution block header fields
parent_hash: Hash32
fee_recipient: ExecutionAddress
state_root: Bytes32
receipts_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
prev_randao: Bytes32
block_number: uint64
gas_limit: uint64
gas_used: uint64
timestamp: uint64
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
base_fee_per_gas: uint256
# Extra payload fields
block_hash: Hash32
transactions_root: Root
withdrawals_root: Root
deposit_receipts_root: Root # [New in EIP-6110]
```
#### `BeaconState`
```python
class BeaconState(Container):
# Versioning
genesis_time: uint64
genesis_validators_root: Root
slot: Slot
fork: Fork
# History
latest_block_header: BeaconBlockHeader
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
# Eth1
eth1_data: Eth1Data
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
eth1_deposit_index: uint64
# Registry
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
# Randomness
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
# Slashings
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
# Participation
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
# Finality
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
previous_justified_checkpoint: Checkpoint
current_justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
# Inactivity
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
# Sync
current_sync_committee: SyncCommittee
next_sync_committee: SyncCommittee
# Execution
latest_execution_payload_header: ExecutionPayloadHeader
# Withdrawals
next_withdrawal_index: WithdrawalIndex
next_withdrawal_validator_index: ValidatorIndex
# Deep history valid from Capella onwards
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
# [New in EIP-6110]
deposit_receipts_start_index: uint64
```
## Beacon chain state transition function
### Block processing
```python
def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block)
if is_execution_enabled(state, block.body):
process_withdrawals(state, block.body.execution_payload)
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110]
process_randao(state, block.body)
process_eth1_data(state, block.body)
process_operations(state, block.body) # [Modified in EIP-6110]
process_sync_aggregate(state, block.body.sync_aggregate)
```
#### Modified `process_operations`
*Note*: The function `process_operations` is modified to process `DepositReceipt` operations included in the payload.
```python
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
# [Modified in EIP-6110]
# Disable former deposit mechanism once all prior deposits are processed
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
if state.eth1_deposit_index < eth1_deposit_index_limit:
assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
else:
assert len(body.deposits) == 0
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
for operation in operations:
fn(state, operation)
for_ops(body.proposer_slashings, process_proposer_slashing)
for_ops(body.attester_slashings, process_attester_slashing)
for_ops(body.attestations, process_attestation)
for_ops(body.deposits, process_deposit) # [Modified in EIP-6110]
for_ops(body.voluntary_exits, process_voluntary_exit)
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
# [New in EIP-6110]
if is_execution_enabled(state, body):
for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt)
```
#### New `process_deposit_receipt`
```python
def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None:
# Set deposit receipt start index
if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX:
state.deposit_receipts_start_index = deposit_receipt.index
apply_deposit(
state=state,
pubkey=deposit_receipt.pubkey,
withdrawal_credentials=deposit_receipt.withdrawal_credentials,
amount=deposit_receipt.amount,
signature=deposit_receipt.signature,
)
```
#### Modified `process_execution_payload`
*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type.
```python
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
# Verify consistency of the parent hash with respect to the previous execution payload header
if is_merge_transition_complete(state):
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
# Verify prev_randao
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
# Verify timestamp
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
# Verify the execution payload is valid
assert execution_engine.notify_new_payload(payload)
# Cache execution payload header
state.latest_execution_payload_header = ExecutionPayloadHeader(
parent_hash=payload.parent_hash,
fee_recipient=payload.fee_recipient,
state_root=payload.state_root,
receipts_root=payload.receipts_root,
logs_bloom=payload.logs_bloom,
prev_randao=payload.prev_randao,
block_number=payload.block_number,
gas_limit=payload.gas_limit,
gas_used=payload.gas_used,
timestamp=payload.timestamp,
extra_data=payload.extra_data,
base_fee_per_gas=payload.base_fee_per_gas,
block_hash=payload.block_hash,
transactions_root=hash_tree_root(payload.transactions),
withdrawals_root=hash_tree_root(payload.withdrawals),
deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP-6110]
)
```
## Testing
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only.
Modifications include:
1. Use `EIP6110_FORK_VERSION` as the previous and current fork version.
2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`.
3. Add `deposit_receipts_start_index` variable to the genesis state initialization.
```python
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
eth1_timestamp: uint64,
deposits: Sequence[Deposit],
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
) -> BeaconState:
fork = Fork(
previous_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] for testing only
current_version=EIP6110_FORK_VERSION, # [Modified in EIP6110]
epoch=GENESIS_EPOCH,
)
state = BeaconState(
genesis_time=eth1_timestamp + GENESIS_DELAY,
fork=fork,
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110]
)
# Process deposits
leaves = list(map(lambda deposit: deposit.data, deposits))
for index, deposit in enumerate(deposits):
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
process_deposit(state, deposit)
# Process activations
for index, validator in enumerate(state.validators):
balance = state.balances[index]
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
# Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at genesis
state.current_sync_committee = get_next_sync_committee(state)
state.next_sync_committee = get_next_sync_committee(state)
# Initialize the execution payload header
state.latest_execution_payload_header = execution_payload_header
return state
```

View File

@@ -0,0 +1,142 @@
# EIP-6110 -- Fork Logic
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Configuration](#configuration)
- [Helper functions](#helper-functions)
- [Misc](#misc)
- [Modified `compute_fork_version`](#modified-compute_fork_version)
- [Fork to EIP-6110](#fork-to-eip-6110)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Introduction
This document describes the process of EIP-6110 upgrade.
## Configuration
Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `EIP6110_FORK_VERSION` | `Version('0x05000000')` |
| `EIP6110_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
## Helper functions
### Misc
#### Modified `compute_fork_version`
```python
def compute_fork_version(epoch: Epoch) -> Version:
"""
Return the fork version at the given ``epoch``.
"""
if epoch >= EIP6110_FORK_EPOCH:
return EIP6110_FORK_EPOCH
if epoch >= CAPELLA_FORK_EPOCH:
return CAPELLA_FORK_VERSION
if epoch >= BELLATRIX_FORK_EPOCH:
return BELLATRIX_FORK_VERSION
if epoch >= ALTAIR_FORK_EPOCH:
return ALTAIR_FORK_VERSION
return GENESIS_FORK_VERSION
```
## Fork to EIP-6110
### Fork trigger
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`.
Note that for the pure EIP-6110 networks, we don't apply `upgrade_to_eip6110` since it starts with EIP-6110 version logic.
### Upgrading the state
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6110_FORK_EPOCH`,
an irregular state change is made to upgrade to EIP-6110.
```python
def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState:
epoch = capella.get_current_epoch(pre)
latest_execution_payload_header = ExecutionPayloadHeader(
parent_hash=pre.latest_execution_payload_header.parent_hash,
fee_recipient=pre.latest_execution_payload_header.fee_recipient,
state_root=pre.latest_execution_payload_header.state_root,
receipts_root=pre.latest_execution_payload_header.receipts_root,
logs_bloom=pre.latest_execution_payload_header.logs_bloom,
prev_randao=pre.latest_execution_payload_header.prev_randao,
block_number=pre.latest_execution_payload_header.block_number,
gas_limit=pre.latest_execution_payload_header.gas_limit,
gas_used=pre.latest_execution_payload_header.gas_used,
timestamp=pre.latest_execution_payload_header.timestamp,
extra_data=pre.latest_execution_payload_header.extra_data,
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
deposit_receipts_root=Root(), # [New in EIP-6110]
)
post = BeaconState(
# Versioning
genesis_time=pre.genesis_time,
genesis_validators_root=pre.genesis_validators_root,
slot=pre.slot,
fork=Fork(
previous_version=pre.fork.current_version,
current_version=EIP6110_FORK_VERSION, # [Modified in EIP-6110]
epoch=epoch,
),
# History
latest_block_header=pre.latest_block_header,
block_roots=pre.block_roots,
state_roots=pre.state_roots,
historical_roots=pre.historical_roots,
# Eth1
eth1_data=pre.eth1_data,
eth1_data_votes=pre.eth1_data_votes,
eth1_deposit_index=pre.eth1_deposit_index,
# Registry
validators=pre.validators,
balances=pre.balances,
# Randomness
randao_mixes=pre.randao_mixes,
# Slashings
slashings=pre.slashings,
# Participation
previous_epoch_participation=pre.previous_epoch_participation,
current_epoch_participation=pre.current_epoch_participation,
# Finality
justification_bits=pre.justification_bits,
previous_justified_checkpoint=pre.previous_justified_checkpoint,
current_justified_checkpoint=pre.current_justified_checkpoint,
finalized_checkpoint=pre.finalized_checkpoint,
# Inactivity
inactivity_scores=pre.inactivity_scores,
# Sync
current_sync_committee=pre.current_sync_committee,
next_sync_committee=pre.next_sync_committee,
# Execution-layer
latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP-6110]
# Withdrawals
next_withdrawal_index=pre.next_withdrawal_index,
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
# Deep history valid from Capella onwards
historical_summaries=pre.historical_summaries,
# EIP-6110
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110]
)
return post
```

View File

@@ -0,0 +1,42 @@
# EIP-6110 -- Honest Validator
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Block proposal](#block-proposal)
- [Deposits](#deposits)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document represents the changes to be made in the code of an "honest validator" to implement EIP-6110.
## Prerequisites
This document is an extension of the [Capella -- Honest Validator](../../capella/validator.md) guide.
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout.
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
## Block proposal
### Deposits
The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function:
```python
def get_eth1_deposit_count(state: BeaconState) -> uint64:
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
if state.eth1_deposit_index < eth1_deposit_index_limit:
return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
else:
return uint64(0)
```

View File

@@ -43,7 +43,7 @@
- [Modified `slash_validator`](#modified-slash_validator)
- [Block processing](#block-processing)
- [Modified `process_attestation`](#modified-process_attestation)
- [Modified `process_deposit`](#modified-process_deposit)
- [Modified `apply_deposit`](#modified-apply_deposit)
- [Sync aggregate processing](#sync-aggregate-processing)
- [Epoch processing](#epoch-processing)
- [Justification and finalization](#justification-and-finalization)
@@ -489,39 +489,29 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
```
#### Modified `process_deposit`
#### Modified `apply_deposit`
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
*Note*: The function `apply_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
```python
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
# Verify the Merkle branch
assert is_valid_merkle_branch(
leaf=hash_tree_root(deposit.data),
branch=deposit.proof,
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
index=state.eth1_deposit_index,
root=state.eth1_data.deposit_root,
)
# Deposits must be processed in order
state.eth1_deposit_index += 1
pubkey = deposit.data.pubkey
amount = deposit.data.amount
def apply_deposit(state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64,
signature: BLSSignature) -> None:
validator_pubkeys = [validator.pubkey for validator in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
amount=deposit.data.amount,
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
amount=amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
# Initialize validator if the deposit signature is valid
if bls.Verify(pubkey, signing_root, deposit.data.signature):
state.validators.append(get_validator_from_deposit(deposit))
if bls.Verify(pubkey, signing_root, signature):
state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
state.balances.append(amount)
# [New in Altair]
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))

View File

@@ -387,7 +387,8 @@ def validate_light_client_update(store: LightClientStore,
pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
if bit
]
fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1)
fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot))
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
signing_root = compute_signing_root(update.attested_header.beacon, domain)
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)

View File

@@ -174,6 +174,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Check the block is valid and compute the post-state
state = pre_state.copy()
block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# [New in Bellatrix]
@@ -181,9 +182,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
validate_merge_block(block)
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
store.blocks[block_root] = block
# Add new state for this block to the store
store.block_states[hash_tree_root(block)] = state
store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -191,15 +192,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
# Update justified checkpoint
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
store.best_justified_checkpoint = state.current_justified_checkpoint
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
store.justified_checkpoint = state.current_justified_checkpoint
# Update checkpoints in store if necessary
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
# Update finalized checkpoint
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
store.finalized_checkpoint = state.finalized_checkpoint
store.justified_checkpoint = state.current_justified_checkpoint
# Eagerly compute unrealized justification and finality.
compute_pulled_up_tip(store, block_root)
```

View File

@@ -27,7 +27,7 @@ Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `CAPELLA_FORK_VERSION` | `Version('0x03000000')` |
| `CAPELLA_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
| `CAPELLA_FORK_EPOCH` | `Epoch(194048)` (April 12, 2023, 10:27:35pm UTC) |
## Helper functions

View File

@@ -11,6 +11,7 @@
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Domain types](#domain-types)
- [Blob](#blob)
- [Preset](#preset)
- [Execution](#execution)
@@ -44,15 +45,22 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi
| Name | SSZ equivalent | Description |
| - | - | - |
| `VersionedHash` | `Bytes32` | |
| `BlobIndex` | `uint64` | |
## Constants
### Domain types
| Name | Value |
| - | - |
| `DOMAIN_BLOB_SIDECAR` | `DomainType('0x0B000000')` |
### Blob
| Name | Value |
| - | - |
| `BLOB_TX_TYPE` | `uint8(0x05)` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
## Preset
@@ -249,7 +257,7 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only.
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
when initializing the first body-root.
```python

View File

@@ -7,9 +7,8 @@
- [Introduction](#introduction)
- [Containers](#containers)
- [`BlobsSidecar`](#blobssidecar)
- [Helpers](#helpers)
- [`validate_blobs_sidecar`](#validate_blobs_sidecar)
- [`validate_blobs`](#validate_blobs)
- [`is_data_available`](#is_data_available)
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
- [`on_block`](#on_block)
@@ -23,55 +22,40 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
## Containers
### `BlobsSidecar`
```python
class BlobsSidecar(Container):
beacon_block_root: Root
beacon_block_slot: Slot
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
kzg_aggregated_proof: KZGProof
```
## Helpers
#### `validate_blobs_sidecar`
#### `validate_blobs`
```python
def validate_blobs_sidecar(slot: Slot,
beacon_block_root: Root,
expected_kzg_commitments: Sequence[KZGCommitment],
blobs_sidecar: BlobsSidecar) -> None:
assert slot == blobs_sidecar.beacon_block_slot
assert beacon_block_root == blobs_sidecar.beacon_block_root
blobs = blobs_sidecar.blobs
# kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
blobs: Sequence[Blob],
proofs: Sequence[KZGProof]) -> None:
assert len(expected_kzg_commitments) == len(blobs)
assert len(blobs) == len(proofs)
# Disabled because not available before switch to single blob sidecars
# assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof)
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
```
#### `is_data_available`
The implementation of `is_data_available` will become more sophisticated during later scaling upgrades.
Initially, verification requires every verifying actor to retrieve the matching `BlobsSidecar`,
and validate the sidecar with `validate_blobs_sidecar`.
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`.
The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobsSidecar` has subsequently been pruned.
The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned.
```python
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_sidecar` is implementation and context dependent, raises an exception if not available.
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_and_proofs` is implementation and context dependent
# It returns all the blobs for the given block root, and raises an exception if not available
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
# For testing, `retrieve_blobs_sidecar` returns "TEST".
# TODO: Remove it once we have a way to inject `BlobsSidecar` into tests.
if isinstance(sidecar, str):
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
# TODO: Remove it once we have a way to inject `BlobSidecar` into tests.
if isinstance(blobs, str) or isinstance(proofs, str):
return True
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
validate_blobs(blob_kzg_commitments, blobs, proofs)
return True
```
@@ -79,7 +63,7 @@ def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments:
### `on_block`
*Note*: The only modification is the addition of the verification of transition block conditions.
*Note*: The only modification is the addition of the blob data availability check.
```python
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
@@ -103,10 +87,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# [New in Deneb]
# Check if blob data is available
# If not, this block MAY be queued and subsequently considered when blob data becomes available
assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments)
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)
# Check the block is valid and compute the post-state
state = pre_state.copy()
block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# Check the merge transition
@@ -114,9 +99,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
validate_merge_block(block)
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
store.blocks[block_root] = block
# Add new state for this block to the store
store.block_states[hash_tree_root(block)] = state
store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -124,15 +109,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
# Update justified checkpoint
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
store.best_justified_checkpoint = state.current_justified_checkpoint
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
store.justified_checkpoint = state.current_justified_checkpoint
# Update checkpoints in store if necessary
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
# Update finalized checkpoint
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
store.finalized_checkpoint = state.finalized_checkpoint
store.justified_checkpoint = state.current_justified_checkpoint
# Eagerly compute unrealized justification and finality.
compute_pulled_up_tip(store, block_root)
```

View File

@@ -64,8 +64,6 @@ Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since i
### Upgrading the state
Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
```python
def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
epoch = capella.get_current_epoch(pre)
@@ -82,10 +80,10 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
timestamp=pre.latest_execution_payload_header.timestamp,
extra_data=pre.latest_execution_payload_header.extra_data,
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
excess_data_gas=uint256(0), # [New in Deneb]
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
excess_data_gas=uint256(0), # [New in Deneb]
)
post = BeaconState(
# Versioning

View File

@@ -18,7 +18,7 @@
## Introduction
This upgrade updates light client data to include the Denbeb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb.
This upgrade updates light client data to include the Deneb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb.
Additional documents describes the impact of the upgrade on certain roles:
- [Full node](./full-node.md)

View File

@@ -10,21 +10,25 @@ The specification of these changes continues in the same format as the network s
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Configuration](#configuration)
- [Containers](#containers)
- [`SignedBeaconBlockAndBlobsSidecar`](#signedbeaconblockandblobssidecar)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_block`](#beacon_block)
- [`beacon_block_and_blobs_sidecar`](#beacon_block_and_blobs_sidecar)
- [Transitioning the gossip](#transitioning-the-gossip)
- [The Req/Resp domain](#the-reqresp-domain)
- [Messages](#messages)
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
- [BeaconBlockAndBlobsSidecarByRoot v1](#beaconblockandblobssidecarbyroot-v1)
- [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1)
- [Configuration](#configuration)
- [Containers](#containers)
- [`BlobSidecar`](#blobsidecar)
- [`SignedBlobSidecar`](#signedblobsidecar)
- [`BlobIdentifier`](#blobidentifier)
- [Helpers](#helpers)
- [`verify_sidecar_signature`](#verify_sidecar_signature)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_block`](#beacon_block)
- [`blob_sidecar_{index}`](#blob_sidecar_index)
- [Transitioning the gossip](#transitioning-the-gossip)
- [The Req/Resp domain](#the-reqresp-domain)
- [Messages](#messages)
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
- [Design decision rationale](#design-decision-rationale)
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
@@ -35,17 +39,51 @@ The specification of these changes continues in the same format as the network s
| Name | Value | Description |
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars |
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
## Containers
### `SignedBeaconBlockAndBlobsSidecar`
### `BlobSidecar`
```python
class SignedBeaconBlockAndBlobsSidecar(Container):
beacon_block: SignedBeaconBlock
blobs_sidecar: BlobsSidecar
class BlobSidecar(Container):
block_root: Root
index: BlobIndex # Index of blob in block
slot: Slot
block_parent_root: Root # Proposer shuffling determinant
proposer_index: ValidatorIndex
blob: Blob
kzg_commitment: KZGCommitment
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
```
### `SignedBlobSidecar`
```python
class SignedBlobSidecar(Container):
message: BlobSidecar
signature: BLSSignature
```
### `BlobIdentifier`
```python
class BlobIdentifier(Container):
block_root: Root
index: BlobIndex
```
### Helpers
#### `verify_sidecar_signature`
```python
def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
proposer = state.validators[signed_blob_sidecar.message.proposer_index]
signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR))
return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
```
## The gossip domain: gossipsub
@@ -55,7 +93,8 @@ Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
### Topics and messages
Topics follow the same specification as in prior upgrades.
The `beacon_block` topic is deprecated and replaced by the `beacon_block_and_blobs_sidecar` topic. All other topics remain stable.
The `beacon_block` topic is modified to also support deneb blocks and new topics are added per table below. All other topics remain stable.
The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here.
@@ -65,34 +104,33 @@ The new topics along with the type of the `data` field of a gossipsub message ar
| Name | Message Type |
| - | - |
| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) |
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
#### Global topics
Deneb introduces a new global topic for beacon block and blobs-sidecars.
Deneb introduces new global topics for blob sidecars.
##### `beacon_block`
This topic is deprecated and clients **MUST NOT** expose in their topic set to any peer. Implementers do not need to do
anything beyond simply skip implementation, and it is explicitly called out as it is a departure from previous versioning
of this topic.
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
Refer to [the section below](#transitioning-the-gossip) for details on how to transition the gossip.
##### `blob_sidecar_{index}`
##### `beacon_block_and_blobs_sidecar`
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
This topic is used to propagate new signed and coupled beacon blocks and blobs sidecars to all nodes on the networks.
The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list
-- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation.
- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`).
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature`.
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`.
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
-- i.e. `sidecar.beacon_block_slot == block.slot`.
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar
-- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)`
### Transitioning the gossip
@@ -121,13 +159,12 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
#### BeaconBlocksByRoot v2
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
After `DENEB_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`.
Clients MUST support requesting blocks by root for pre-fork-epoch blocks.
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
@@ -138,16 +175,29 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
#### BeaconBlockAndBlobsSidecarByRoot v1
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
**Protocol ID:** `/eth2/beacon_chain/req/beacon_block_and_blobs_sidecar_by_root/1/`
#### BlobSidecarsByRoot v1
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
New in deneb.
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
|--------------------------|-------------------------------|
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
Request Content:
```
(
List[Root, MAX_REQUEST_BLOCKS]
List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS]
)
```
@@ -155,29 +205,42 @@ Response Content:
```
(
List[SignedBeaconBlockAndBlobsSidecar, MAX_REQUEST_BLOCKS]
List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlockAndBlobsSidecar.beacon_block.message)`).
The response is a list of `SignedBeaconBlockAndBlobsSidecar` whose length is less than or equal to the number of requests.
It may be less in the case that the responding peer is missing blocks and sidecars.
Requests sidecars by block root and index.
The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests.
It may be less in the case that the responding peer is missing blocks or sidecars.
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer
may not be available beyond the initial distribution via gossip.
`BeaconBlockAndBlobsSidecarByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown).
No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing).
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`.
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
Clients MUST respond with at least one block and sidecar, if they have it.
Clients MUST respond with at least one sidecar, if they have it.
Clients MAY limit the number of blocks and sidecars in the response.
#### BlobsSidecarsByRange v1
#### BlobSidecarsByRange v1
**Protocol ID:** `/eth2/beacon_chain/req/blobs_sidecars_by_range/1/`
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
New in deneb.
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
|--------------------------|-------------------------------|
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
Request Content:
```
@@ -190,72 +253,70 @@ Request Content:
Response Content:
```
(
List[BlobsSidecar, MAX_REQUEST_BLOBS_SIDECARS]
List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
Requests blobs sidecars in the slot range `[start_slot, start_slot + count)`,
leading up to the current head block as selected by fork choice.
Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice.
The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the beacon block proposer
may not be available beyond the initial distribution via gossip.
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and
correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`.
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`.
`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window.
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blobs on this range.
Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
Such peers that are unable to successfully reply to this range of requests MAY get descored
or disconnected at any time.
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
to be fully compliant with `BlobsSidecarsByRange` requests.
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
to be fully compliant with `BlobSidecarsByRange` requests.
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
participating in the networking immediately, other peers MAY
disconnect and/or temporarily ban such an un-synced or semi-synced client.
Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it,
and no more than `MAX_REQUEST_BLOBS_SIDECARS` sidecars.
Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars.
The following blobs sidecars, where they exist, MUST be sent in consecutive order.
Clients MUST include all blob sidecars of each block from which they include blob sidecars.
Clients MAY limit the number of blobs sidecars in the response.
The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order.
An empty `BlobSidecar` is one that does not contain any blobs, but contains non-zero `beacon_block_root`, `beacon_block_slot` and a valid `kzg_aggregated_proof`.
Clients MAY NOT want to consider empty `BlobSidecar`s in rate limiting logic.
Slots that do not contain known blobs MUST be skipped, mimicking the behaviour
of the `BlocksByRange` request. Only response chunks with known blobs should
therefore be sent.
The response MUST contain no more than `count` blobs sidecars.
Clients MAY limit the number of blob sidecars in the response.
Clients MUST respond with blobs sidecars from their view of the current fork choice
-- that is, blobs sidecars as included by blocks from the single chain defined by the current head.
The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars.
Clients MUST respond with blob sidecars from their view of the current fork choice
-- that is, blob sidecars as included by blocks from the single chain defined by the current head.
Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
Clients MUST respond with blobs sidecars that are consistent from a single chain within the context of the request.
Clients MUST respond with blob sidecars that are consistent from a single chain within the context of the request.
After the initial blobs sidecar, clients MAY stop in the process of responding
if their fork choice changes the view of the chain in the context of the request.
After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
# Design decision rationale
## Design decision rationale
## Why are blobs relayed as a sidecar, separate from beacon blocks?
### Why are blobs relayed as a sidecar, separate from beacon blocks?
This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`:
with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS)
thus avoiding all blobs being downloaded by all beacon nodes on the network.
Such sharding design may introduce an updated `BlobsSidecar` to identify the shard,
Such sharding design may introduce an updated `BlobSidecar` to identify the shard,
but does not affect the `BeaconBlock` structure.

View File

@@ -65,14 +65,17 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
| `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point |
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form |
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data |
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic data blob |
## Constants
| Name | Value | Notes |
| - | - | - |
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
| `BYTES_PER_COMMITMENT` | `uint64(48)` | The number of bytes in a KZG commitment |
| `BYTES_PER_PROOF` | `uint64(48)` | The number of bytes in a KZG proof |
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob |
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
@@ -102,7 +105,7 @@ but reusing the `mainnet` settings in public networks is a critical security req
| `KZG_SETUP_G2_LENGTH` | `65` |
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
| `KZG_SETUP_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
## Helper functions
@@ -249,10 +252,11 @@ def compute_challenge(blob: Blob,
```python
def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
"""
Compute the modular inverse of x
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
Compute the modular inverse of x (for x != 0)
i.e. return y such that x * y % BLS_MODULUS == 1
"""
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
assert (int(x) % BLS_MODULUS) != 0
return BLSFieldElement(pow(x, -1, BLS_MODULUS))
```
#### `div`
@@ -273,7 +277,7 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen
BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
"""
assert len(points) == len(scalars)
result = bls.Z1
result = bls.Z1()
for x, a in zip(points, scalars):
result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a))
return KZGCommitment(bls.G1_to_bytes48(result))
@@ -303,8 +307,10 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
z: BLSFieldElement) -> BLSFieldElement:
"""
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
Uses the barycentric formula:
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``.
- When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the
position that ``z`` is in the domain.
- When ``z`` is not in the domain, the barycentric formula is used:
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
"""
width = len(polynomial)
@@ -323,7 +329,7 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
result += int(div(a, b) % BLS_MODULUS)
result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
result = result * int(BLS_MODULUS + pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
return BLSFieldElement(result % BLS_MODULUS)
```
@@ -338,6 +344,7 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
"""
Public method.
"""
assert len(blob) == BYTES_PER_BLOB
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob))
```
@@ -345,17 +352,22 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
```python
def verify_kzg_proof(commitment_bytes: Bytes48,
z: Bytes32,
y: Bytes32,
z_bytes: Bytes32,
y_bytes: Bytes32,
proof_bytes: Bytes48) -> bool:
"""
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
Receives inputs as bytes.
Public method.
"""
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT
assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT
assert len(proof_bytes) == BYTES_PER_PROOF
return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes),
bytes_to_bls_field(z),
bytes_to_bls_field(y),
bytes_to_bls_field(z_bytes),
bytes_to_bls_field(y_bytes),
bytes_to_kzg_proof(proof_bytes))
```
@@ -371,10 +383,10 @@ def verify_kzg_proof_impl(commitment: KZGCommitment,
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
"""
# Verify: P - y = Q * (X - z)
X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2(), (BLS_MODULUS - z) % BLS_MODULUS))
P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS))
return bls.pairing_check([
[P_minus_y, bls.neg(bls.G2)],
[P_minus_y, bls.neg(bls.G2())],
[bls.bytes48_to_G1(proof), X_minus_z]
])
```
@@ -411,29 +423,35 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
# Verify: e(sum r^i proof_i, [s]) ==
# e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1])
proof_lincomb = g1_lincomb(proofs, r_powers)
proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)])
C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
proof_z_lincomb = g1_lincomb(
proofs,
[BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)],
)
C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS))
for commitment, y in zip(commitments, ys)]
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
return bls.pairing_check([
[proof_lincomb, bls.neg(KZG_SETUP_G2[1])],
[bls.add(C_minus_y_lincomb, proof_z_lincomb), bls.G2]
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))],
[bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()]
])
```
#### `compute_kzg_proof`
```python
def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof:
def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]:
"""
Compute KZG proof at point `z` for the polynomial represented by `blob`.
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z).
Public method.
"""
assert len(blob) == BYTES_PER_BLOB
assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT
polynomial = blob_to_polynomial(blob)
return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z))
proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes))
return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
```
#### `compute_quotient_eval_within_domain`
@@ -459,7 +477,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement,
f_i = int(BLS_MODULUS) + int(polynomial[i]) - int(y) % BLS_MODULUS
numerator = f_i * int(omega_i) % BLS_MODULUS
denominator = int(z) * (int(BLS_MODULUS) + int(z) - int(omega_i)) % BLS_MODULUS
result += div(BLSFieldElement(numerator), BLSFieldElement(denominator))
result += int(div(BLSFieldElement(numerator), BLSFieldElement(denominator)))
return BLSFieldElement(result % BLS_MODULUS)
```
@@ -467,9 +485,9 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement,
#### `compute_kzg_proof_impl`
```python
def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[KZGProof, BLSFieldElement]:
"""
Helper function for compute_kzg_proof() and compute_aggregate_kzg_proof().
Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`.
"""
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
@@ -479,7 +497,7 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro
# For all x_i, compute (x_i - z)
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
for x in roots_of_unity_brp]
# Compute the quotient polynomial directly in evaluation form
quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB
@@ -491,21 +509,25 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro
# Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z).
quotient_polynomial[i] = div(a, b)
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)), y
```
#### `compute_blob_kzg_proof`
```python
def compute_blob_kzg_proof(blob: Blob) -> KZGProof:
def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof:
"""
Given a blob, return the KZG proof that is used to verify it against the commitment.
This method does not verify that the commitment is correct with respect to `blob`.
Public method.
"""
commitment = blob_to_kzg_commitment(blob)
assert len(blob) == BYTES_PER_BLOB
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
commitment = bytes_to_kzg_commitment(commitment_bytes)
polynomial = blob_to_polynomial(blob)
evaluation_challenge = compute_challenge(blob, commitment)
return compute_kzg_proof_impl(polynomial, evaluation_challenge)
proof, _ = compute_kzg_proof_impl(polynomial, evaluation_challenge)
return proof
```
#### `verify_blob_kzg_proof`
@@ -519,6 +541,10 @@ def verify_blob_kzg_proof(blob: Blob,
Public method.
"""
assert len(blob) == BYTES_PER_BLOB
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
assert len(proof_bytes) == BYTES_PER_PROOF
commitment = bytes_to_kzg_commitment(commitment_bytes)
polynomial = blob_to_polynomial(blob)
@@ -548,6 +574,9 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
commitments, evaluation_challenges, ys, proofs = [], [], [], []
for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes):
assert len(blob) == BYTES_PER_BLOB
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
assert len(proof_bytes) == BYTES_PER_PROOF
commitment = bytes_to_kzg_commitment(commitment_bytes)
commitments.append(commitment)
polynomial = blob_to_polynomial(blob)
@@ -558,3 +587,4 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
```

View File

@@ -16,8 +16,7 @@
- [Block and sidecar proposal](#block-and-sidecar-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
- [Blob KZG commitments](#blob-kzg-commitments)
- [Constructing the `SignedBeaconBlockAndBlobsSidecar`](#constructing-the-signedbeaconblockandblobssidecar)
- [Block](#block)
- [Constructing the `SignedBlobSidecar`s](#constructing-the-signedblobsidecars)
- [Sidecar](#sidecar)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@@ -45,7 +44,9 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload`
Implementers may also retrieve blobs individually per transaction.
```python
def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
def get_blobs_and_kzg_commitments(
payload_id: PayloadId
) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]:
# pylint: disable=unused-argument
...
```
@@ -53,7 +54,6 @@ def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFi
## Beacon chain responsibilities
All validator responsibilities remain unchanged other than those noted below.
Namely, the blob handling and the addition of `SignedBeaconBlockAndBlobsSidecar`.
### Block and sidecar proposal
@@ -68,42 +68,62 @@ use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blo
```python
def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
blobs: Sequence[Blob],
blob_kzg_commitments: Sequence[KZGCommitment]) -> None:
blob_kzg_commitments: Sequence[KZGCommitment],
blob_kzg_proofs: Sequence[KZGProof]) -> None:
# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments)
# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
assert len(blob_kzg_commitments) == len(blobs)
assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)]
assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs)
assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs)
```
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
#### Constructing the `SignedBeaconBlockAndBlobsSidecar`
To construct a `SignedBeaconBlockAndBlobsSidecar`, a `signed_beacon_block_and_blobs_sidecar` is defined with the necessary context for block and sidecar proposal.
#### Constructing the `SignedBlobSidecar`s
##### Block
Set `signed_beacon_block_and_blobs_sidecar.beacon_block = block` where `block` is obtained above.
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
##### Sidecar
Coupled with block, the corresponding blobs are packaged into a sidecar object for distribution to the network.
Set `signed_beacon_block_and_blobs_sidecar.blobs_sidecar = sidecar` where `sidecar` is obtained from:
Blobs associated with a block are packaged into sidecar objects for distribution to the network.
Each `sidecar` is obtained from:
```python
def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar:
return BlobsSidecar(
beacon_block_root=hash_tree_root(block),
beacon_block_slot=block.slot,
blobs=blobs,
# Disabled because not available before switch to single blob sidecars
kzg_aggregated_proof=KZGProof(), # compute_aggregate_kzg_proof(blobs),
)
def get_blob_sidecars(block: BeaconBlock,
blobs: Sequence[Blob],
blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
return [
BlobSidecar(
block_root=hash_tree_root(block),
index=index,
slot=block.slot,
block_parent_root=block.parent_root,
blob=blob,
kzg_commitment=block.body.blob_kzg_commitments[index],
kzg_proof=blob_kzg_proofs[index],
)
for index, blob in enumerate(blobs)
]
```
This `signed_beacon_block_and_blobs_sidecar` is then published to the global `beacon_block_and_blobs_sidecar` topic.
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
`signature` is obtained from:
```python
def get_blob_sidecar_signature(state: BeaconState,
sidecar: BlobSidecar,
privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_BLOB_SIDECAR, compute_epoch_at_slot(sidecar.slot))
signing_root = compute_signing_root(sidecar, domain)
return bls.Sign(privkey, signing_root)
```
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable,
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,
to ensure the data-availability of these blobs throughout the network.
After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them.
After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them.

View File

@@ -1835,13 +1835,12 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
##### Deposits
```python
def get_validator_from_deposit(deposit: Deposit) -> Validator:
amount = deposit.data.amount
def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
return Validator(
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
@@ -1850,6 +1849,34 @@ def get_validator_from_deposit(deposit: Deposit) -> Validator:
)
```
```python
def apply_deposit(state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64,
signature: BLSSignature) -> None:
validator_pubkeys = [v.pubkey for v in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
amount=amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
if not bls.Verify(pubkey, signing_root, signature):
return
# Add validator and balance entries
state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
state.balances.append(amount)
else:
# Increase balance by deposit amount
index = ValidatorIndex(validator_pubkeys.index(pubkey))
increase_balance(state, index, amount)
```
```python
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
# Verify the Merkle branch
@@ -1864,28 +1891,13 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
# Deposits must be processed in order
state.eth1_deposit_index += 1
pubkey = deposit.data.pubkey
amount = deposit.data.amount
validator_pubkeys = [v.pubkey for v in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
amount=deposit.data.amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
if not bls.Verify(pubkey, signing_root, deposit.data.signature):
return
# Add validator and balance entries
state.validators.append(get_validator_from_deposit(deposit))
state.balances.append(amount)
else:
# Increase balance by deposit amount
index = ValidatorIndex(validator_pubkeys.index(pubkey))
increase_balance(state, index, amount)
apply_deposit(
state=state,
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
amount=deposit.data.amount,
signature=deposit.data.signature,
)
```
##### Voluntary exits

View File

@@ -8,21 +8,27 @@
- [Introduction](#introduction)
- [Fork choice](#fork-choice)
- [Constant](#constant)
- [Preset](#preset)
- [Configuration](#configuration)
- [Helpers](#helpers)
- [`LatestMessage`](#latestmessage)
- [`is_previous_epoch_justified`](#is_previous_epoch_justified)
- [`Store`](#store)
- [`get_forkchoice_store`](#get_forkchoice_store)
- [`get_slots_since_genesis`](#get_slots_since_genesis)
- [`get_current_slot`](#get_current_slot)
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
- [`get_ancestor`](#get_ancestor)
- [`get_latest_attesting_balance`](#get_latest_attesting_balance)
- [`get_weight`](#get_weight)
- [`get_voting_source`](#get_voting_source)
- [`filter_block_tree`](#filter_block_tree)
- [`get_filtered_block_tree`](#get_filtered_block_tree)
- [`get_head`](#get_head)
- [`should_update_justified_checkpoint`](#should_update_justified_checkpoint)
- [`update_checkpoints`](#update_checkpoints)
- [`update_unrealized_checkpoints`](#update_unrealized_checkpoints)
- [Pull-up tip helpers](#pull-up-tip-helpers)
- [`compute_pulled_up_tip`](#compute_pulled_up_tip)
- [`on_tick` helpers](#on_tick-helpers)
- [`on_tick_per_slot`](#on_tick_per_slot)
- [`on_attestation` helpers](#on_attestation-helpers)
- [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time)
- [`validate_on_attestation`](#validate_on_attestation)
@@ -67,12 +73,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass
| -------------------- | ----------- |
| `INTERVALS_PER_SLOT` | `uint64(3)` |
### Preset
| Name | Value | Unit | Duration |
| -------------------------------- | ------------ | :---: | :--------: |
| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds |
### Configuration
| Name | Value |
@@ -92,8 +92,26 @@ class LatestMessage(object):
root: Root
```
### `is_previous_epoch_justified`
```python
def is_previous_epoch_justified(store: Store) -> bool:
current_slot = get_current_slot(store)
current_epoch = compute_epoch_at_slot(current_slot)
return store.justified_checkpoint.epoch + 1 == current_epoch
```
#### `Store`
The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below:
- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm.
- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint.
- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries.
- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block.
```python
@dataclass
class Store(object):
@@ -101,13 +119,15 @@ class Store(object):
genesis_time: uint64
justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
best_justified_checkpoint: Checkpoint
unrealized_justified_checkpoint: Checkpoint
unrealized_finalized_checkpoint: Checkpoint
proposer_boost_root: Root
equivocating_indices: Set[ValidatorIndex]
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
```
#### `get_forkchoice_store`
@@ -130,12 +150,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
genesis_time=anchor_state.genesis_time,
justified_checkpoint=justified_checkpoint,
finalized_checkpoint=finalized_checkpoint,
best_justified_checkpoint=justified_checkpoint,
unrealized_justified_checkpoint=justified_checkpoint,
unrealized_finalized_checkpoint=finalized_checkpoint,
proposer_boost_root=proposer_boost_root,
equivocating_indices=set(),
blocks={anchor_root: copy(anchor_block)},
block_states={anchor_root: copy(anchor_state)},
checkpoint_states={justified_checkpoint: copy(anchor_state)},
unrealized_justifications={anchor_root: justified_checkpoint}
)
```
@@ -167,21 +189,20 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
block = store.blocks[root]
if block.slot > slot:
return get_ancestor(store, block.parent_root, slot)
elif block.slot == slot:
return root
else:
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
return root
return root
```
#### `get_latest_attesting_balance`
#### `get_weight`
```python
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
def get_weight(store: Store, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
active_indices = get_active_validator_indices(state, get_current_epoch(state))
unslashed_and_active_indices = [
i for i in get_active_validator_indices(state, get_current_epoch(state))
if not state.validators[i].slashed
]
attestation_score = Gwei(sum(
state.validators[i].effective_balance for i in active_indices
state.validators[i].effective_balance for i in unslashed_and_active_indices
if (i in store.latest_messages
and i not in store.equivocating_indices
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
@@ -197,11 +218,32 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
return attestation_score + proposer_score
```
#### `get_voting_source`
```python
def get_voting_source(store: Store, block_root: Root) -> Checkpoint:
"""
Compute the voting source checkpoint in event that block with root ``block_root`` is the head block
"""
block = store.blocks[block_root]
current_epoch = compute_epoch_at_slot(get_current_slot(store))
block_epoch = compute_epoch_at_slot(block.slot)
if current_epoch > block_epoch:
# The block is from a prior epoch, the voting source will be pulled-up
return store.unrealized_justifications[block_root]
else:
# The block is not from a prior epoch, therefore the voting source is not pulled up
head_state = store.block_states[block_root]
return head_state.current_justified_checkpoint
```
#### `filter_block_tree`
*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`.
```python
def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
block = store.blocks[block_root]
@@ -219,17 +261,29 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB
return True
return False
# If leaf block, check finalized/justified checkpoints as matching latest.
head_state = store.block_states[block_root]
current_epoch = compute_epoch_at_slot(get_current_slot(store))
voting_source = get_voting_source(store, block_root)
# The voting source should be at the same height as the store's justified checkpoint
correct_justified = (
store.justified_checkpoint.epoch == GENESIS_EPOCH
or head_state.current_justified_checkpoint == store.justified_checkpoint
or voting_source.epoch == store.justified_checkpoint.epoch
)
# If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized
# justification is higher than the store and that the voting source is not more than two epochs ago
if not correct_justified and is_previous_epoch_justified(store):
correct_justified = (
store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and
voting_source.epoch + 2 >= current_epoch
)
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
correct_finalized = (
store.finalized_checkpoint.epoch == GENESIS_EPOCH
or head_state.finalized_checkpoint == store.finalized_checkpoint
or store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
)
# If expected finalized/justified, add to viable block-tree and signal viability to parent.
if correct_justified and correct_finalized:
blocks[block_root] = block
@@ -270,28 +324,83 @@ def get_head(store: Store) -> Root:
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
head = max(children, key=lambda root: (get_weight(store, root), root))
```
#### `should_update_justified_checkpoint`
#### `update_checkpoints`
```python
def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None:
"""
To address the bouncing attack, only update conflicting justified
checkpoints in the fork choice if in the early slots of the epoch.
Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
Update checkpoints in store if necessary
"""
if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
return True
# Update justified checkpoint
if justified_checkpoint.epoch > store.justified_checkpoint.epoch:
store.justified_checkpoint = justified_checkpoint
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
return False
# Update finalized checkpoint
if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
store.finalized_checkpoint = finalized_checkpoint
```
return True
#### `update_unrealized_checkpoints`
```python
def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint,
unrealized_finalized_checkpoint: Checkpoint) -> None:
"""
Update unrealized checkpoints in store if necessary
"""
# Update unrealized justified checkpoint
if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch:
store.unrealized_justified_checkpoint = unrealized_justified_checkpoint
# Update unrealized finalized checkpoint
if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch:
store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint
```
#### Pull-up tip helpers
##### `compute_pulled_up_tip`
```python
def compute_pulled_up_tip(store: Store, block_root: Root) -> None:
state = store.block_states[block_root].copy()
# Pull up the post-state of the block to the next epoch boundary
process_justification_and_finalization(state)
store.unrealized_justifications[block_root] = state.current_justified_checkpoint
update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
# If the block is from a prior epoch, apply the realized values
block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot)
current_epoch = compute_epoch_at_slot(get_current_slot(store))
if block_epoch < current_epoch:
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
```
#### `on_tick` helpers
##### `on_tick_per_slot`
```python
def on_tick_per_slot(store: Store, time: uint64) -> None:
previous_slot = get_current_slot(store)
# Update store time
store.time = time
current_slot = get_current_slot(store)
# If this is a new slot, reset store.proposer_boost_root
if current_slot > previous_slot:
store.proposer_boost_root = Root()
# If a new epoch, pull-up justification and finalization from previous epoch
if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
```
#### `on_attestation` helpers
@@ -324,7 +433,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc
# Check that the epoch number and slot number are matching
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
# Attestation target must be for a known block. If target block is unknown, delay consideration until block is found
assert target.root in store.blocks
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
@@ -372,27 +481,13 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn
```python
def on_tick(store: Store, time: uint64) -> None:
previous_slot = get_current_slot(store)
# update store time
store.time = time
current_slot = get_current_slot(store)
# Reset store.proposer_boost_root if this is a new slot
if current_slot > previous_slot:
store.proposer_boost_root = Root()
# Not a new epoch, return
if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
return
# Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
store.justified_checkpoint = store.best_justified_checkpoint
# If the ``store.time`` falls behind, while loop catches up slot by slot
# to ensure that every previous slot is processed with ``on_tick_per_slot``
tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT
while get_current_slot(store) < tick_slot:
previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT
on_tick_per_slot(store, previous_time)
on_tick_per_slot(store, time)
```
#### `on_block`
@@ -415,11 +510,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Check the block is valid and compute the post-state
state = pre_state.copy()
block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
store.blocks[block_root] = block
# Add new state for this block to the store
store.block_states[hash_tree_root(block)] = state
store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -427,17 +523,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
# Update justified checkpoint
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
store.best_justified_checkpoint = state.current_justified_checkpoint
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
store.justified_checkpoint = state.current_justified_checkpoint
# Update checkpoints in store if necessary
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
# Update finalized checkpoint
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
store.finalized_checkpoint = state.finalized_checkpoint
store.justified_checkpoint = state.current_justified_checkpoint
# Eagerly compute unrealized justification and finality
compute_pulled_up_tip(store, block_root)
```
#### `on_attestation`

View File

@@ -1 +1 @@
1.3.0-rc.2
1.3.0-rc.5

View File

@@ -668,10 +668,9 @@ def run_test_single_fork(spec, phases, state, fork):
# Upgrade to post-fork spec, attested block is still before the fork
attested_block = block.copy()
attested_state = state.copy()
state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False)
sync_aggregate, _ = get_sync_aggregate(phases[fork], state)
state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate)
spec = phases[fork]
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
assert test.store.finalized_header.beacon.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
@@ -755,18 +754,16 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
# ..., attested is from `fork_1`, ...
fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH')
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1)
state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False)
state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch)
spec = phases[fork_1]
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
# ..., and signature is from `fork_2`
fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH')
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1)
state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False)
sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state)
state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate)
spec = phases[fork_2]
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
# Check that update applies
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)

View File

@@ -37,7 +37,7 @@ from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
@with_capella_and_later
@spec_state_test
def test_success_bls_change(spec, state):
def test_bls_change(spec, state):
index = 0
signed_address_change = get_signed_address_change(spec, state, validator_index=index)
pre_credentials = state.validators[index].withdrawal_credentials
@@ -60,7 +60,46 @@ def test_success_bls_change(spec, state):
@with_capella_and_later
@spec_state_test
def test_success_exit_and_bls_change(spec, state):
def test_deposit_and_bls_change(spec, state):
initial_registry_len = len(state.validators)
initial_balances_len = len(state.balances)
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
signed_address_change = get_signed_address_change(
spec, state,
validator_index=validator_index,
withdrawal_pubkey=deposit.data.pubkey, # Deposit helper defaults to use pubkey as withdrawal credential
)
deposit_credentials = deposit.data.withdrawal_credentials
assert deposit_credentials[:1] == spec.BLS_WITHDRAWAL_PREFIX
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
block.body.deposits.append(deposit)
block.body.bls_to_execution_changes.append(signed_address_change)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
assert len(state.validators) == initial_registry_len + 1
assert len(state.balances) == initial_balances_len + 1
validator_credentials = state.validators[validator_index].withdrawal_credentials
assert deposit_credentials != validator_credentials
assert validator_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
assert validator_credentials[1:12] == b'\x00' * 11
assert validator_credentials[12:] == signed_address_change.message.to_execution_address
@with_capella_and_later
@spec_state_test
def test_exit_and_bls_change(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH

View File

@@ -44,8 +44,11 @@ def pytest_addoption(parser):
help="bls-default: make tests that are not dependent on BLS run without BLS"
)
parser.addoption(
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro"],
help="bls-type: use 'pyecc' or 'milagro' implementation for BLS"
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"],
help=(
"bls-type: use specified BLS implementation;"
"fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)"
)
)
@@ -88,5 +91,9 @@ def bls_type(request):
bls_utils.use_py_ecc()
elif bls_type == "milagro":
bls_utils.use_milagro()
elif bls_type == "arkworks":
bls_utils.use_arkworks()
elif bls_type == "fastest":
bls_utils.use_fastest()
else:
raise Exception(f"unrecognized bls type: {bls_type}")

View File

@@ -22,7 +22,7 @@ def test_one_blob(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
@@ -38,7 +38,7 @@ def test_max_blobs(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)

View File

@@ -0,0 +1,54 @@
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.context import (
spec_state_test,
with_deneb_and_later,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
def _run_validate_blobs(spec, state, blob_count):
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs)
blobs = [sidecar.blob for sidecar in blob_sidecars]
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_zero_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=0)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_one_blob(spec, state):
_run_validate_blobs(spec, state, blob_count=1)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_two_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=2)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_max_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)

View File

@@ -1,53 +0,0 @@
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.context import (
spec_state_test,
with_deneb_and_later,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
def _run_validate_blobs_sidecar_test(spec, state, blob_count):
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
expected_commitments = [spec.blob_to_kzg_commitment(blobs[i]) for i in range(blob_count)]
spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_zero_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=0)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_one_blob(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=1)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_two_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=2)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_max_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)

View File

@@ -1,32 +1,108 @@
import random
from eth2spec.test.context import (
spec_state_test,
spec_test,
single_phase,
with_deneb_and_later,
expect_assertion_error
)
from eth2spec.test.helpers.sharding import (
get_sample_blob,
get_poly_in_both_forms,
eval_poly_in_coeff_form,
)
from eth2spec.utils import bls
from eth2spec.utils.bls import BLS_MODULUS
G1 = bls.G1_to_bytes48(bls.G1())
P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
"0123456789abcdef0123456789abcdef0123456789abcdef")
P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
"0123456789abcdef0123456789abcdef0123456789abcde0")
def bls_add_one(x):
"""
Adds "one" (actually bls.G1()) to a compressed group element.
Useful to compute definitely incorrect proofs.
"""
return bls.G1_to_bytes48(
bls.add(bls.bytes48_to_G1(x), bls.G1())
)
def field_element_bytes(x):
return int.to_bytes(x % BLS_MODULUS, 32, "little")
@with_deneb_and_later
@spec_state_test
def test_verify_kzg_proof(spec, state):
x = 3
@spec_test
@single_phase
def test_verify_kzg_proof(spec):
"""
Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs.
"""
x = field_element_bytes(3)
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
proof, y = spec.compute_kzg_proof(blob, x)
assert spec.verify_kzg_proof(commitment, x, y, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_verify_kzg_proof_incorrect_proof(spec):
"""
Test the wrapper function `verify_kzg_proof` fails on an incorrect proof.
"""
x = field_element_bytes(3465)
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
proof, y = spec.compute_kzg_proof(blob, x)
proof = bls_add_one(proof)
assert not spec.verify_kzg_proof(commitment, x, y, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_verify_kzg_proof_impl(spec):
"""
Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs.
"""
x = BLS_MODULUS - 1
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
polynomial = spec.blob_to_polynomial(blob)
proof = spec.compute_kzg_proof_impl(polynomial, x)
proof, y = spec.compute_kzg_proof_impl(polynomial, x)
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
assert spec.verify_kzg_proof_impl(commitment, x, y, proof)
@with_deneb_and_later
@spec_state_test
def test_barycentric_outside_domain(spec, state):
@spec_test
@single_phase
def test_verify_kzg_proof_impl_incorrect_proof(spec):
"""
Test the implementation function `verify_kzg_proof` fails on an incorrect proof.
"""
x = 324561
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
polynomial = spec.blob_to_polynomial(blob)
proof, y = spec.compute_kzg_proof_impl(polynomial, x)
proof = bls_add_one(proof)
assert not spec.verify_kzg_proof_impl(commitment, x, y, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_barycentric_outside_domain(spec):
"""
Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain
(the roots of unity).
@@ -43,9 +119,9 @@ def test_barycentric_outside_domain(spec, state):
for _ in range(n_samples):
# Get a random evaluation point and make sure it's not a root of unity
z = rng.randint(0, spec.BLS_MODULUS - 1)
z = rng.randint(0, BLS_MODULUS - 1)
while z in roots_of_unity_brp:
z = rng.randint(0, spec.BLS_MODULUS - 1)
z = rng.randint(0, BLS_MODULUS - 1)
# Get p(z) by evaluating poly in coefficient form
p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z)
@@ -58,8 +134,9 @@ def test_barycentric_outside_domain(spec, state):
@with_deneb_and_later
@spec_state_test
def test_barycentric_within_domain(spec, state):
@spec_test
@single_phase
def test_barycentric_within_domain(spec):
"""
Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain
(the roots of unity).
@@ -90,8 +167,9 @@ def test_barycentric_within_domain(spec, state):
@with_deneb_and_later
@spec_state_test
def test_compute_kzg_proof_within_domain(spec, state):
@spec_test
@single_phase
def test_compute_kzg_proof_within_domain(spec):
"""
Create and verify KZG proof that p(z) == y
where z is in the domain of our KZG scheme (i.e. a relevant root of unity).
@@ -103,7 +181,148 @@ def test_compute_kzg_proof_within_domain(spec, state):
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
for i, z in enumerate(roots_of_unity_brp):
proof = spec.compute_kzg_proof_impl(polynomial, z)
proof, y = spec.compute_kzg_proof_impl(polynomial, z)
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z)
assert spec.verify_kzg_proof_impl(commitment, z, y, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_verify_blob_kzg_proof(spec):
"""
Test the functions to compute and verify a blob KZG proof
"""
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
assert spec.verify_blob_kzg_proof(blob, commitment, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_verify_blob_kzg_proof_incorrect_proof(spec):
"""
Check that `verify_blob_kzg_proof` fails on an incorrect proof
"""
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
proof = bls_add_one(proof)
assert not spec.verify_blob_kzg_proof(blob, commitment, proof)
@with_deneb_and_later
@spec_test
@single_phase
def test_bls_modular_inverse(spec):
"""
Verify computation of multiplicative inverse
"""
rng = random.Random(5566)
# Should fail for x == 0
expect_assertion_error(lambda: spec.bls_modular_inverse(0))
expect_assertion_error(lambda: spec.bls_modular_inverse(spec.BLS_MODULUS))
expect_assertion_error(lambda: spec.bls_modular_inverse(2 * spec.BLS_MODULUS))
# Test a trivial inversion
assert 1 == int(spec.bls_modular_inverse(1))
# Test a random inversion
r = rng.randint(0, spec.BLS_MODULUS - 1)
r_inv = int(spec.bls_modular_inverse(r))
assert r * r_inv % BLS_MODULUS == 1
@with_deneb_and_later
@spec_test
@single_phase
def test_validate_kzg_g1_generator(spec):
"""
Verify that `validate_kzg_g1` allows the generator G1
"""
spec.validate_kzg_g1(bls.G1_to_bytes48(bls.G1()))
@with_deneb_and_later
@spec_test
@single_phase
def test_validate_kzg_g1_neutral_element(spec):
"""
Verify that `validate_kzg_g1` allows the neutral element in G1
"""
spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1()))
@with_deneb_and_later
@spec_test
@single_phase
def test_validate_kzg_g1_not_in_g1(spec):
"""
Verify that `validate_kzg_g1` fails on point not in G1
"""
expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_IN_G1))
@with_deneb_and_later
@spec_test
@single_phase
def test_validate_kzg_g1_not_on_curve(spec):
"""
Verify that `validate_kzg_g1` fails on point not in G1
"""
expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_ON_CURVE))
@with_deneb_and_later
@spec_test
@single_phase
def test_bytes_to_bls_field_zero(spec):
"""
Verify that `bytes_to_bls_field` handles zero
"""
spec.bytes_to_bls_field(b"\0" * 32)
@with_deneb_and_later
@spec_test
@single_phase
def test_bytes_to_bls_field_modulus_minus_one(spec):
"""
Verify that `bytes_to_bls_field` handles modulus minus one
"""
spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS))
@with_deneb_and_later
@spec_test
@single_phase
def test_bytes_to_bls_field_modulus(spec):
"""
Verify that `bytes_to_bls_field` fails on BLS modulus
"""
expect_assertion_error(lambda: spec.bytes_to_bls_field(
BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)
))
@with_deneb_and_later
@spec_test
@single_phase
def test_bytes_to_bls_field_max(spec):
"""
Verify that `bytes_to_bls_field` fails on 2**256 - 1
"""
expect_assertion_error(lambda: spec.bytes_to_bls_field(b"\xFF" * 32))

View File

@@ -1,21 +0,0 @@
from eth2spec.test.helpers.constants import (
DENEB,
MINIMAL,
)
from eth2spec.test.helpers.sharding import (
get_sample_blob,
)
from eth2spec.test.context import (
with_phases,
spec_state_test,
with_presets,
)
@with_phases([DENEB])
@spec_state_test
@with_presets([MINIMAL])
def test_blob_to_kzg_commitment(spec, state):
blob = get_sample_blob(spec)
spec.blob_to_kzg_commitment(blob)

View File

@@ -17,7 +17,7 @@ from eth2spec.test.context import (
@spec_state_test
@with_presets([MINIMAL])
def test_tx_peek_blob_versioned_hashes(spec, state):
otx, blobs, commitments = get_sample_opaque_tx(spec)
otx, _, commitments, _ = get_sample_opaque_tx(spec)
data_hashes = spec.tx_peek_blob_versioned_hashes(otx)
expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments]
assert expected == data_hashes

View File

@@ -0,0 +1,158 @@
from eth2spec.test.context import (
always_bls,
spec_state_test,
with_deneb_and_later,
expect_assertion_error
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.helpers.keys import (
pubkey_to_privkey
)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_and_kzg_commitments(spec, state):
"""
Test `validate_blobs_and_kzg_commitments`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
spec.validate_blobs_and_kzg_commitments(block.body.execution_payload,
blobs,
blob_kzg_commitments,
proofs)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state):
"""
Test `validate_blobs_and_kzg_commitments`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
expect_assertion_error(
lambda: spec.validate_blobs_and_kzg_commitments(
block.body.execution_payload,
blobs[:-1],
blob_kzg_commitments,
proofs
)
)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state):
"""
Test `validate_blobs_and_kzg_commitments`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
expect_assertion_error(
lambda: spec.validate_blobs_and_kzg_commitments(
block.body.execution_payload,
blobs,
blob_kzg_commitments,
proofs[:-1]
)
)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state):
"""
Test `validate_blobs_and_kzg_commitments`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:])
expect_assertion_error(
lambda: spec.validate_blobs_and_kzg_commitments(
block.body.execution_payload,
blobs,
blob_kzg_commitments,
proofs
)
)
@with_deneb_and_later
@spec_state_test
def test_blob_sidecar_signature(spec, state):
"""
Test `get_blob_sidecar_signature`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
proposer = state.validators[blob_sidecars[1].proposer_index]
privkey = pubkey_to_privkey[proposer.pubkey]
sidecar_signature = spec.get_blob_sidecar_signature(state,
blob_sidecars[1],
privkey)
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
@with_deneb_and_later
@spec_state_test
@always_bls
def test_blob_sidecar_signature_incorrect(spec, state):
"""
Test `get_blob_sidecar_signature`
"""
blob_count = 4
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
sidecar_signature = spec.get_blob_sidecar_signature(state,
blob_sidecars[1],
123)
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)

View File

@@ -187,7 +187,7 @@ def add_attestations_to_state(spec, state, attestations, slot):
spec.process_attestation(state, attestation)
def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
for index in range(committees_per_slot):
def participants_filter(comm):
@@ -262,7 +262,7 @@ def state_transition_with_full_block(spec,
if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)):
attestations = _get_valid_attestation_at_slot(
attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
@@ -272,7 +272,7 @@ def state_transition_with_full_block(spec,
block.body.attestations.append(attestation)
if fill_prev_epoch:
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
attestations = _get_valid_attestation_at_slot(
attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
@@ -300,7 +300,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
slots = state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(slots):
target_slot = state.slot - slot_offset
attestations += _get_valid_attestation_at_slot(
attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,
@@ -311,7 +311,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(1, slots):
target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset
attestations += _get_valid_attestation_at_slot(
attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,

View File

@@ -3,6 +3,7 @@ from eth2spec.test.exceptions import BlockNotFoundException
from eth2spec.test.helpers.attestations import (
next_epoch_with_attestations,
next_slots_with_attestations,
state_transition_with_full_block,
)
@@ -16,12 +17,13 @@ def get_anchor_root(spec, state):
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True,
merge_block=False, block_not_found=False, is_optimistic=False):
pre_state = store.block_states[signed_block.message.parent_root]
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
if merge_block:
assert spec.is_merge_transition_block(pre_state, signed_block.message.body)
if store.time < block_time:
on_tick_and_append_step(spec, store, block_time, test_steps)
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
while store.time < block_time:
time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT
on_tick_and_append_step(spec, store, time, test_steps)
post_state = yield from add_block(
spec, store, signed_block, test_steps,
@@ -39,6 +41,11 @@ def add_attestation(spec, store, attestation, test_steps, is_from_block=False):
test_steps.append({'attestation': get_attestation_file_name(attestation)})
def add_attestations(spec, store, attestations, test_steps, is_from_block=False):
for attestation in attestations:
yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block)
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
@@ -90,6 +97,7 @@ def get_attester_slashing_file_name(attester_slashing):
def on_tick_and_append_step(spec, store, time, test_steps):
spec.on_tick(store, time)
test_steps.append({'tick': int(time)})
output_store_checks(spec, store, test_steps)
def run_on_block(spec, store, signed_block, valid=True):
@@ -153,25 +161,7 @@ def add_block(spec,
assert store.blocks[block_root] == signed_block.message
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
if not is_optimistic:
test_steps.append({
'checks': {
'time': int(store.time),
'head': get_formatted_head_output(spec, store),
'justified_checkpoint': {
'epoch': int(store.justified_checkpoint.epoch),
'root': encode_hex(store.justified_checkpoint.root),
},
'finalized_checkpoint': {
'epoch': int(store.finalized_checkpoint.epoch),
'root': encode_hex(store.finalized_checkpoint.root),
},
'best_justified_checkpoint': {
'epoch': int(store.best_justified_checkpoint.epoch),
'root': encode_hex(store.best_justified_checkpoint.root),
},
'proposer_boost_root': encode_hex(store.proposer_boost_root),
}
})
output_store_checks(spec, store, test_steps)
return store.block_states[signed_block.message.hash_tree_root()]
@@ -217,6 +207,32 @@ def get_formatted_head_output(spec, store):
}
def output_head_check(spec, store, test_steps):
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
def output_store_checks(spec, store, test_steps):
test_steps.append({
'checks': {
'time': int(store.time),
'head': get_formatted_head_output(spec, store),
'justified_checkpoint': {
'epoch': int(store.justified_checkpoint.epoch),
'root': encode_hex(store.justified_checkpoint.root),
},
'finalized_checkpoint': {
'epoch': int(store.finalized_checkpoint.epoch),
'root': encode_hex(store.finalized_checkpoint.root),
},
'proposer_boost_root': encode_hex(store.proposer_boost_root),
}
})
def apply_next_epoch_with_attestations(spec,
state,
store,
@@ -263,6 +279,39 @@ def apply_next_slots_with_attestations(spec,
return post_state, store, last_signed_block
def is_ready_to_justify(spec, state):
"""
Check if the given ``state`` will trigger justification updates at epoch boundary.
"""
temp_state = state.copy()
spec.process_justification_and_finalization(temp_state)
return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch
def find_next_justifying_slot(spec,
state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None):
temp_state = state.copy()
signed_blocks = []
justifying_slot = None
while justifying_slot is None:
signed_block = state_transition_with_full_block(
spec,
temp_state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn,
)
signed_blocks.append(signed_block)
if is_ready_to_justify(spec, temp_state):
justifying_slot = temp_state.slot
return signed_blocks, justifying_slot
def get_pow_block_file_name(pow_block):
return f"pow_block_{encode_hex(pow_block.block_hash)}"

View File

@@ -47,6 +47,7 @@ def _set_operations_by_dict(block, operation_dict):
def _state_transition_and_sign_block_at_slot(spec,
state,
sync_aggregate=None,
operation_dict=None):
"""
Cribbed from ``transition_unsigned_block`` helper
@@ -61,6 +62,8 @@ def _state_transition_and_sign_block_at_slot(spec,
Thus use dict to pass operations.
"""
block = build_empty_block(spec, state)
if sync_aggregate is not None:
block.body.sync_aggregate = sync_aggregate
if operation_dict:
_set_operations_by_dict(block, operation_dict)
@@ -141,7 +144,7 @@ def state_transition_across_slots_with_ignoring_proposers(spec,
next_slot(spec, state)
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None):
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
spec.process_slots(state, state.slot + 1)
assert state.slot % spec.SLOTS_PER_EPOCH == 0
@@ -172,7 +175,12 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
if with_block:
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
return state, _state_transition_and_sign_block_at_slot(
post_spec,
state,
sync_aggregate=sync_aggregate,
operation_dict=operation_dict,
)
else:
return state, None

View File

@@ -31,7 +31,7 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
signature_state,
signature_slot,
max(signature_slot, 1) - 1,
committee_indices[:num_participants],
)
sync_aggregate = spec.SyncAggregate(

View File

@@ -177,7 +177,7 @@ def get_opt_head_block_root(spec, mega_store):
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda root: (spec.get_latest_attesting_balance(store, root), root))
head = max(children, key=lambda root: (spec.get_weight(store, root), root))
def is_invalidated(mega_store, block_root):

View File

@@ -12,7 +12,7 @@ from eth2spec.utils.ssz.ssz_impl import serialize
#
# Containers from Deneb
# Containers from EIP-4844
#
MAX_CALLDATA_SIZE = 2**24
MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
@@ -101,13 +101,16 @@ def get_poly_in_both_forms(spec, rng=None):
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
blobs = []
blob_kzg_commitments = []
blob_kzg_proofs = []
blob_versioned_hashes = []
for _ in range(blob_count):
blob = get_sample_blob(spec, rng)
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
blobs.append(blob)
blob_kzg_commitments.append(blob_commitment)
blob_kzg_proofs.append(blob_kzg_proof)
blob_versioned_hashes.append(blob_versioned_hash)
signed_blob_tx = SignedBlobTransaction(
@@ -117,4 +120,4 @@ def get_sample_opaque_tx(spec, blob_count=1, rng=None):
)
serialized_tx = serialize(signed_blob_tx)
opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx
return opaque_tx, blobs, blob_kzg_commitments
return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs

View File

@@ -1,7 +1,7 @@
from eth2spec.test.context import (
MAINNET,
spec_state_test,
with_all_phases,
with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.attestations import (
@@ -31,7 +31,7 @@ def _apply_base_block_a(spec, state, store, test_steps):
assert spec.get_head(store) == signed_block_a.message.hash_tree_root()
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_ex_ante_vanilla(spec, state):
"""
@@ -118,7 +118,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro
return proposer_score // base_effective_balance + 1
@with_all_phases
@with_altair_and_later
@with_presets([MAINNET], reason="to create non-duplicate committee")
@spec_state_test
def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state):
@@ -191,7 +191,7 @@ def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, st
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_ex_ante_sandwich_without_attestations(spec, state):
"""
@@ -254,7 +254,7 @@ def test_ex_ante_sandwich_without_attestations(spec, state):
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_ex_ante_sandwich_with_honest_attestation(spec, state):
"""
@@ -335,7 +335,7 @@ def test_ex_ante_sandwich_with_honest_attestation(spec, state):
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@with_presets([MAINNET], reason="to create non-duplicate committee")
@spec_state_test
def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state):

View File

@@ -1,9 +1,8 @@
import random
from eth_utils import encode_hex
from eth2spec.test.context import (
spec_state_test,
with_all_phases,
with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
@@ -22,6 +21,8 @@ from eth2spec.test.helpers.fork_choice import (
add_attestation,
tick_and_run_on_attestation,
tick_and_add_block,
output_head_check,
apply_next_epoch_with_attestations,
)
from eth2spec.test.helpers.forks import (
is_post_altair,
@@ -36,7 +37,7 @@ from eth2spec.test.helpers.state import (
rng = random.Random(1001)
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_genesis(spec, state):
test_steps = []
@@ -60,7 +61,7 @@ def test_genesis(spec, state):
yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_chain_no_attestations(spec, state):
test_steps = []
@@ -71,11 +72,7 @@ def test_chain_no_attestations(spec, state):
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# On receiving a block of `GENESIS_SLOT + 1` slot
block_1 = build_empty_block_for_next_slot(spec, state)
@@ -88,16 +85,12 @@ def test_chain_no_attestations(spec, state):
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(block_2)
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_split_tie_breaker_no_attestations(spec, state):
test_steps = []
@@ -109,11 +102,7 @@ def test_split_tie_breaker_no_attestations(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# Create block at slot 1
block_1_state = genesis_state.copy()
@@ -135,16 +124,12 @@ def test_split_tie_breaker_no_attestations(spec, state):
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
assert spec.get_head(store) == highest_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_shorter_chain_but_heavier_weight(spec, state):
test_steps = []
@@ -156,11 +141,7 @@ def test_shorter_chain_but_heavier_weight(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# build longer tree
long_state = genesis_state.copy()
@@ -183,16 +164,12 @@ def test_shorter_chain_but_heavier_weight(spec, state):
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(short_block)
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_filtered_block_tree(spec, state):
@@ -203,11 +180,7 @@ def test_filtered_block_tree(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# transition state past initial couple of epochs
next_epoch(spec, state)
@@ -227,13 +200,7 @@ def test_filtered_block_tree(spec, state):
# the last block in the branch should be the head
expected_head_root = spec.hash_tree_root(signed_blocks[-1].message)
assert spec.get_head(store) == expected_head_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
}
})
output_head_check(spec, store, test_steps)
#
# create branch containing the justified block but not containing enough on
@@ -274,16 +241,12 @@ def test_filtered_block_tree(spec, state):
# ensure that get_head still returns the head from the previous branch
assert spec.get_head(store) == expected_head_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store)
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_proposer_boost_correct_head(spec, state):
test_steps = []
@@ -295,11 +258,7 @@ def test_proposer_boost_correct_head(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
state_1 = genesis_state.copy()
@@ -337,19 +296,14 @@ def test_proposer_boost_correct_head(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
assert spec.get_head(store) == spec.hash_tree_root(block_2)
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_all_phases
@with_altair_and_later
@spec_state_test
def test_discard_equivocations(spec, state):
def test_discard_equivocations_on_attester_slashing(spec, state):
test_steps = []
genesis_state = state.copy()
@@ -359,11 +313,7 @@ def test_discard_equivocations(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
# Build block that serves as head before discarding equivocations
state_1 = genesis_state.copy()
@@ -418,11 +368,359 @@ def test_discard_equivocations(spec, state):
# The head should revert to block_2
yield from add_attester_slashing(spec, store, attester_slashing, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(block_2)
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_discard_equivocations_slashed_validator_censoring(spec, state):
# Check that the store does not count LMD votes from validators that are slashed in the justified state
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
# We will slash all validators voting at the 2nd slot of epoch 0
current_slot = spec.get_current_slot(store)
eqv_slot = current_slot + 1
eqv_epoch = spec.compute_epoch_at_slot(eqv_slot)
assert eqv_slot % spec.SLOTS_PER_EPOCH == 1
assert eqv_epoch == 0
slashed_validators = []
comm_count = spec.get_committee_count_per_slot(state, eqv_epoch)
for comm_index in range(comm_count):
comm = spec.get_beacon_committee(state, eqv_slot, comm_index)
slashed_validators += comm
assert len(slashed_validators) > 0
# Slash those validators in the state
for val_index in slashed_validators:
state.validators[val_index].slashed = True
# Store this state as the anchor state
anchor_state = state.copy()
# Generate an anchor block with correct state root
anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root())
yield 'anchor_state', anchor_state
yield 'anchor_block', anchor_block
# Get a new store with the anchor state & anchor block
store = spec.get_forkchoice_store(anchor_state, anchor_block)
# Now generate the store checks
current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
# Create two competing blocks at eqv_slot
next_slots(spec, state, eqv_slot - state.slot - 1)
assert state.slot == eqv_slot - 1
state_1 = state.copy()
block_1 = build_empty_block_for_next_slot(spec, state_1)
signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1)
state_2 = state.copy()
block_2 = build_empty_block_for_next_slot(spec, state_2)
block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32
signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2)
assert block_1.slot == block_2.slot == eqv_slot
# Add both blocks to the store
yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
# Find out which block will win in tie breaking
if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2):
block_low_root = block_1.hash_tree_root()
block_low_root_post_state = state_1
block_high_root = block_2.hash_tree_root()
else:
block_low_root = block_2.hash_tree_root()
block_low_root_post_state = state_2
block_high_root = block_1.hash_tree_root()
assert block_low_root < block_high_root
# Tick to next slot so proposer boost does not apply
current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT
on_tick_and_append_step(spec, store, current_time, test_steps)
# Check that block with higher root wins
assert spec.get_head(store) == block_high_root
# Create attestation for block with lower root
attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True)
# Check that all attesting validators were slashed in the anchor state
att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0)
for i in att_comm:
assert anchor_state.validators[i].slashed
# Add attestation to the store
yield from add_attestation(spec, store, attestation, test_steps)
# Check that block with higher root still wins
assert spec.get_head(store) == block_high_root
output_head_check(spec, store, test_steps)
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_voting_source_within_two_epoch(spec, state):
"""
Check that the store allows for a head block that has:
- store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
- store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
- store.voting_source[block_root].epoch + 2 >= current_epoch, and
- store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 3
for _ in range(3):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
assert store.finalized_checkpoint.epoch == 2
# Copy the state to use later
fork_state = state.copy()
# Fill epoch 4
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
assert store.finalized_checkpoint.epoch == 3
# Create a fork from the earlier saved state
next_epoch(spec, fork_state)
assert spec.compute_epoch_at_slot(fork_state.slot) == 5
_, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
# Only keep the blocks from epoch 5, so discard the last generated block
signed_blocks = signed_blocks[:-1]
last_fork_block = signed_blocks[-1].message
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5
# Now add the fork to the store
for signed_block in signed_blocks:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
assert store.finalized_checkpoint.epoch == 3
# Check that the last block from the fork is the head
# LMD votes for the competing branch are overwritten so this fork should win
last_fork_block_root = last_fork_block.hash_tree_root()
# assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
# assert store.voting_source[last_fork_block_root].epoch + 2 >= \
# spec.compute_epoch_at_slot(spec.get_current_slot(store))
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot)
assert spec.get_head(store) == last_fork_block_root
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_voting_source_beyond_two_epoch(spec, state):
"""
Check that the store doesn't allow for a head block that has:
- store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
- store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
- store.voting_source[block_root].epoch + 2 < current_epoch, and
- store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 3
for _ in range(3):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
assert store.finalized_checkpoint.epoch == 2
# Copy the state to use later
fork_state = state.copy()
# Fill epoch 4 and 5
for _ in range(2):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
assert store.finalized_checkpoint.epoch == 4
# Create a fork from the earlier saved state
for _ in range(2):
next_epoch(spec, fork_state)
assert spec.compute_epoch_at_slot(fork_state.slot) == 6
assert fork_state.current_justified_checkpoint.epoch == 3
_, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
# Only keep the blocks from epoch 6, so discard the last generated block
signed_blocks = signed_blocks[:-1]
last_fork_block = signed_blocks[-1].message
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6
# Store the head before adding the fork to the store
correct_head = spec.get_head(store)
# Now add the fork to the store
for signed_block in signed_blocks:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
assert store.finalized_checkpoint.epoch == 4
last_fork_block_root = last_fork_block.hash_tree_root()
last_fork_block_state = store.block_states[last_fork_block_root]
assert last_fork_block_state.current_justified_checkpoint.epoch == 3
# Check that the head is unchanged
# assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
# assert store.voting_source[last_fork_block_root].epoch + 2 < \
# spec.compute_epoch_at_slot(spec.get_current_slot(store))
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot)
assert spec.get_head(store) == correct_head
yield 'steps', test_steps
"""
Note:
We are unable to generate test vectors that check failure of the correct_finalized condition.
We cannot generate a block that:
- has !correct_finalized, and
- has correct_justified, and
- is a descendant of store.justified_checkpoint.root
The block being a descendant of store.justified_checkpoint.root is necessary because
filter_block_tree descends the tree starting at store.justified_checkpoint.root
@with_altair_and_later
@spec_state_test
def test_incorrect_finalized(spec, state):
# Check that the store doesn't allow for a head block that has:
# - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and
# - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and
# - store.finalized_checkpoint.root != get_ancestor(store, block_root, finalized_slot)
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 4
for _ in range(4):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
assert store.finalized_checkpoint.epoch == 3
# Identify the fork block as the last block in epoch 4
fork_block_root = state.latest_block_header.parent_root
fork_block = store.blocks[fork_block_root]
assert spec.compute_epoch_at_slot(fork_block.slot) == 4
# Copy the state to use later
fork_state = store.block_states[fork_block_root].copy()
assert spec.compute_epoch_at_slot(fork_state.slot) == 4
assert fork_state.current_justified_checkpoint.epoch == 3
assert fork_state.finalized_checkpoint.epoch == 2
# Create a fork from the earlier saved state
for _ in range(2):
next_epoch(spec, fork_state)
assert spec.compute_epoch_at_slot(fork_state.slot) == 6
assert fork_state.current_justified_checkpoint.epoch == 4
assert fork_state.finalized_checkpoint.epoch == 3
# Fill epoch 6
signed_blocks = []
_, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
signed_blocks += signed_blocks_1
assert spec.compute_epoch_at_slot(fork_state.slot) == 7
# Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork
assert fork_state.current_justified_checkpoint.epoch == 6
assert fork_state.finalized_checkpoint.epoch == 3
# Create a chain in epoch 7 that has new justification for epoch 7
_, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
# Only keep the blocks from epoch 7, so discard the last generated block
signed_blocks_2 = signed_blocks_2[:-1]
signed_blocks += signed_blocks_2
last_fork_block = signed_blocks[-1].message
assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7
# Now add the fork to the store
for signed_block in signed_blocks:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
assert store.justified_checkpoint.epoch == 6
assert store.finalized_checkpoint.epoch == 3
# Fill epoch 5 and 6 in the original chain
for _ in range(2):
state, store, signed_head_block = yield from apply_next_epoch_with_attestations(
spec, state, store, True, False, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6
assert store.finalized_checkpoint.epoch == 5
# Store the expected head
head_root = signed_head_block.message.hash_tree_root()
# Check that the head is unchanged
last_fork_block_root = last_fork_block.hash_tree_root()
assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch
assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert store.finalized_checkpoint.root != spec.get_ancestor(store, last_fork_block_root, finalized_slot)
assert spec.get_head(store) != last_fork_block_root
assert spec.get_head(store) == head_root
yield 'steps', test_steps
"""

View File

@@ -0,0 +1,498 @@
from eth2spec.test.context import (
spec_state_test,
with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.constants import (
MINIMAL,
)
from eth2spec.test.helpers.attestations import (
state_transition_with_full_block,
get_valid_attestation,
get_valid_attestation_at_slot,
)
from eth2spec.test.helpers.block import (
build_empty_block,
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store_and_block,
on_tick_and_append_step,
add_attestations,
tick_and_add_block,
apply_next_epoch_with_attestations,
find_next_justifying_slot,
is_ready_to_justify,
)
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
next_epoch,
next_slot,
transition_to,
)
TESTING_PRESETS = [MINIMAL]
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state):
"""
[Case 1]
{ epoch 4 }{ epoch 5 }
[c4]<--[a]<--[-]<--[y]
↑____[-]<--[z]
At c4, c3 is the latest justified checkpoint (or something earlier)
The block y doesn't have enough votes to justify c4.
The block z also doesn't have enough votes to justify c4.
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 3
for _ in range(3):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# create block_a, it needs 2 more full blocks to justify epoch 4
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
for signed_block in signed_blocks[:-2]:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.get_head(store) == signed_block.message.hash_tree_root()
state = store.block_states[spec.get_head(store)].copy()
assert state.current_justified_checkpoint.epoch == 3
next_slot(spec, state)
state_a = state.copy()
# to test the "no withholding" situation, temporarily store the blocks in lists
signed_blocks_of_y = []
signed_blocks_of_z = []
# add an empty block on chain y
block_y = build_empty_block_for_next_slot(spec, state)
signed_block_y = state_transition_and_sign_block(spec, state, block_y)
signed_blocks_of_y.append(signed_block_y)
# chain y has some on-chain attestations, but not enough to justify c4
signed_block_y = state_transition_with_full_block(spec, state, True, True)
assert not is_ready_to_justify(spec, state)
signed_blocks_of_y.append(signed_block_y)
assert store.justified_checkpoint.epoch == 3
state = state_a.copy()
signed_block_z = None
# add one block on chain z, which is not enough to justify c4
attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
block_z = build_empty_block_for_next_slot(spec, state)
block_z.body.attestations = [attestation]
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
signed_blocks_of_z.append(signed_block_z)
# add an empty block on chain z
block_z = build_empty_block_for_next_slot(spec, state)
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
signed_blocks_of_z.append(signed_block_z)
# ensure z couldn't justify c4
assert not is_ready_to_justify(spec, state)
# apply blocks to store
# (i) slot block_a.slot + 1
signed_block_y = signed_blocks_of_y.pop(0)
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
# apply block of chain `z`
signed_block_z = signed_blocks_of_z.pop(0)
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
# (ii) slot block_a.slot + 2
# apply block of chain `z`
signed_block_z = signed_blocks_of_z.pop(0)
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
# apply block of chain `y`
signed_block_y = signed_blocks_of_y.pop(0)
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
# chain `y` remains the winner since it arrives earlier than `z`
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
# tick to the prior of the epoch boundary
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
# chain `y` reminds the winner
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
# to next block
next_epoch(spec, state)
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
yield 'steps', test_steps
def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch):
"""
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 2
for _ in range(2):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
if is_justifying_previous_epoch:
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, False, False, test_steps=test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
else:
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
if is_justifying_previous_epoch:
# try to find the block that can justify epoch 3
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True)
else:
# try to find the block that can justify epoch 4
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
for signed_block in signed_blocks:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
spec.get_head(store) == signed_block.message.hash_tree_root()
state = store.block_states[spec.get_head(store)].copy()
if is_justifying_previous_epoch:
assert state.current_justified_checkpoint.epoch == 2
else:
assert state.current_justified_checkpoint.epoch == 3
assert is_ready_to_justify(spec, state)
state_b = state.copy()
# add chain y
if is_justifying_previous_epoch:
signed_block_y = state_transition_with_full_block(spec, state, False, True)
else:
signed_block_y = state_transition_with_full_block(spec, state, True, True)
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
if is_justifying_previous_epoch:
assert store.justified_checkpoint.epoch == 2
else:
assert store.justified_checkpoint.epoch == 3
# add attestations of y
temp_state = state.copy()
next_slot(spec, temp_state)
attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot))
current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
yield from add_attestations(spec, store, attestations_for_y, test_steps)
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
if attemped_reorg:
# add chain z
state = state_b.copy()
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
transition_to(spec, state, slot)
block_z = build_empty_block_for_next_slot(spec, state)
assert spec.compute_epoch_at_slot(block_z.slot) == 5
signed_block_z = state_transition_and_sign_block(spec, state, block_z)
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
else:
# next epoch
state = state_b.copy()
next_epoch(spec, state)
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
# no reorg
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
if is_justifying_previous_epoch:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
else:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state):
"""
[Case 2]
{ epoch 4 }{ epoch 5 }
[c4]<--[b]<--[y]
↑______________[z]
At c4, c3 is the latest justified checkpoint (or something earlier)
block_b: the block that can justify c4.
z: the child of block of x at the first slot of epoch 5.
block z can reorg the chain from block y.
"""
yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False)
def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch):
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 2
for _ in range(2):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
if is_justifying_previous_epoch:
block_a = build_empty_block_for_next_slot(spec, state)
signed_block_a = state_transition_and_sign_block(spec, state, block_a)
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
else:
# fill one more epoch
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
signed_block_a = state_transition_with_full_block(spec, state, True, True)
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
spec.get_head(store) == signed_block_a.message.hash_tree_root()
state = store.block_states[spec.get_head(store)].copy()
if is_justifying_previous_epoch:
assert state.current_justified_checkpoint.epoch == 2
else:
assert state.current_justified_checkpoint.epoch == 3
state_a = state.copy()
if is_justifying_previous_epoch:
# try to find the block that can justify epoch 3
_, justifying_slot = find_next_justifying_slot(spec, state, False, True)
else:
# try to find the block that can justify epoch 4
_, justifying_slot = find_next_justifying_slot(spec, state, True, True)
last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1
last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1
# to test the "no withholding" situation, temporarily store the blocks in lists
signed_blocks_of_y = []
# build an empty chain to the slot prior epoch boundary
signed_blocks_of_empty_chain = []
states_of_empty_chain = []
for slot in range(state.slot + 1, last_slot_of_y + 1):
block = build_empty_block(spec, state, slot=slot)
signed_block = state_transition_and_sign_block(spec, state, block)
signed_blocks_of_empty_chain.append(signed_block)
states_of_empty_chain.append(state.copy())
signed_blocks_of_y.append(signed_block)
signed_block_y = signed_blocks_of_empty_chain[-1]
# create 2/3 votes for the empty chain
attestations_for_y = []
# target_is_current = not is_justifying_previous_epoch
attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot))
attestations_for_y.append(attestations)
for state in states_of_empty_chain:
attestations = list(get_valid_attestation_at_slot(state, spec, state.slot))
attestations_for_y.append(attestations)
state = state_a.copy()
signed_block_z = None
for slot in range(state_a.slot + 1, last_slot_of_z + 1):
# apply chain y, the empty chain
if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0:
signed_block_y = signed_blocks_of_y.pop(0)
assert signed_block_y.message.slot == slot
yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
# apply chain z, a fork chain that includes these attestations_for_y
block = build_empty_block(spec, state, slot=slot)
if (
len(attestations_for_y) > 0 and (
(not is_justifying_previous_epoch)
or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5)
)
):
block.body.attestations = attestations_for_y.pop(0)
signed_block_z = state_transition_and_sign_block(spec, state, block)
if signed_block_y != signed_block_z:
yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
if is_ready_to_justify(spec, state):
break
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
if is_justifying_previous_epoch:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
else:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
if enough_ffg:
assert is_ready_to_justify(spec, state)
else:
assert not is_ready_to_justify(spec, state)
# to next epoch
next_epoch(spec, state)
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
if enough_ffg:
# reorg
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
if is_justifying_previous_epoch:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
else:
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
else:
# no reorg
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state):
"""
[Case 3]
"""
yield from _run_include_votes_of_another_empty_chain(
spec, state, enough_ffg=True, is_justifying_previous_epoch=False)
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state):
"""
[Case 4]
"""
yield from _run_include_votes_of_another_empty_chain(
spec, state, enough_ffg=False, is_justifying_previous_epoch=False)
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_delayed_justification_current_epoch(spec, state):
"""
[Case 5]
To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
this is the basic case if there is no chain z
{ epoch 4 }{ epoch 5 }
[c4]<--[b]<--[y]
At c4, c3 is the latest justified checkpoint.
block_b: the block that can justify c4.
"""
yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False)
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_delayed_justification_previous_epoch(spec, state):
"""
[Case 6]
Similar to ``test_delayed_justification_current_epoch``,
but includes attestations during epoch N to justify checkpoint N-1.
{ epoch 3 }{ epoch 4 }{ epoch 5 }
[c3]<---------------[c4]---[b]<---------------------------------[y]
"""
yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True)
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state):
"""
[Case 7]
Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
but includes attestations during epoch N to justify checkpoint N-1.
{ epoch 3 }{ epoch 4 }{ epoch 5 }
[c3]<---------------[c4]<--[b]<--[y]
↑______________[z]
At c4, c2 is the latest justified checkpoint.
block_b: the block that can justify c3.
z: the child of block of x at the first slot of epoch 5.
block z can reorg the chain from block y.
"""
yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True)
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state):
"""
[Case 8]
Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``,
but includes attestations during epoch N to justify checkpoint N-1.
"""
yield from _run_include_votes_of_another_empty_chain(
spec, state, enough_ffg=True, is_justifying_previous_epoch=True)

View File

@@ -0,0 +1,205 @@
from eth2spec.test.context import (
spec_state_test,
with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.constants import (
MINIMAL,
)
from eth2spec.test.helpers.attestations import (
state_transition_with_full_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store_and_block,
on_tick_and_append_step,
tick_and_add_block,
apply_next_epoch_with_attestations,
find_next_justifying_slot,
)
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
next_epoch,
)
TESTING_PRESETS = [MINIMAL]
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_withholding_attack(spec, state):
"""
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 3
for _ in range(3):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Create the attack block that includes justifying attestations for epoch 4
# This block is withheld & revealed only in epoch 5
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
assert len(signed_blocks) > 1
signed_attack_block = signed_blocks[-1]
for signed_block in signed_blocks[:-1]:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.get_head(store) == signed_block.message.hash_tree_root()
assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
state = store.block_states[spec.get_head(store)].copy()
assert spec.compute_epoch_at_slot(state.slot) == 4
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Create an honest chain in epoch 5 that includes the justifying attestations from the attack block
next_epoch(spec, state)
assert spec.compute_epoch_at_slot(state.slot) == 5
assert state.current_justified_checkpoint.epoch == 3
# Create two block in the honest chain with full attestations, and add to the store
for _ in range(2):
signed_block = state_transition_with_full_block(spec, state, True, False)
yield from tick_and_add_block(spec, store, signed_block, test_steps)
# Create final block in the honest chain that includes the justifying attestations from the attack block
honest_block = build_empty_block_for_next_slot(spec, state)
honest_block.body.attestations = signed_attack_block.message.body.attestations
signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
# Add the honest block to the store
yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Tick to the next slot so proposer boost is not a factor in choosing the head
current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Upon revealing the withheld attack block, the honest block should still be the head
yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
# As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated
assert store.justified_checkpoint.epoch == 4
# Even after going to the next epoch, the honest block should remain the head
slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
yield 'steps', test_steps
@with_altair_and_later
@spec_state_test
@with_presets(TESTING_PRESETS, reason="too slow")
def test_withholding_attack_unviable_honest_chain(spec, state):
"""
Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond
two epochs ago.
"""
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
next_epoch(spec, state)
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
# Fill epoch 1 to 3
for _ in range(3):
state, store, _ = yield from apply_next_epoch_with_attestations(
spec, state, store, True, True, test_steps=test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
next_epoch(spec, state)
assert spec.compute_epoch_at_slot(state.slot) == 5
# Create the attack block that includes justifying attestations for epoch 5
# This block is withheld & revealed only in epoch 6
signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
assert len(signed_blocks) > 1
signed_attack_block = signed_blocks[-1]
for signed_block in signed_blocks[:-1]:
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert spec.get_head(store) == signed_block.message.hash_tree_root()
assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
state = store.block_states[spec.get_head(store)].copy()
assert spec.compute_epoch_at_slot(state.slot) == 5
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Create an honest chain in epoch 6 that includes the justifying attestations from the attack block
next_epoch(spec, state)
assert spec.compute_epoch_at_slot(state.slot) == 6
assert state.current_justified_checkpoint.epoch == 3
# Create two block in the honest chain with full attestations, and add to the store
for _ in range(2):
signed_block = state_transition_with_full_block(spec, state, True, False)
assert state.current_justified_checkpoint.epoch == 3
yield from tick_and_add_block(spec, store, signed_block, test_steps)
# Create final block in the honest chain that includes the justifying attestations from the attack block
honest_block = build_empty_block_for_next_slot(spec, state)
honest_block.body.attestations = signed_attack_block.message.body.attestations
signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
honest_block_root = signed_honest_block.message.hash_tree_root()
assert state.current_justified_checkpoint.epoch == 3
# Add the honest block to the store
yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store))
assert current_epoch == 6
# assert store.voting_source[honest_block_root].epoch == 3
assert spec.get_head(store) == honest_block_root
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Tick to the next slot so proposer boost is not a factor in choosing the head
current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.get_head(store) == honest_block_root
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
# Upon revealing the withheld attack block, it should become the head
yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
# The attack block is pulled up and store.justified_checkpoint is updated
assert store.justified_checkpoint.epoch == 5
attack_block_root = signed_attack_block.message.hash_tree_root()
assert spec.get_head(store) == attack_block_root
# After going to the next epoch, the honest block should become the head
slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
# assert store.voting_source[honest_block_root].epoch == 5
assert spec.get_head(store) == honest_block_root
yield 'steps', test_steps

View File

@@ -1,87 +0,0 @@
from copy import deepcopy
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.test.context import (
spec_state_test,
with_all_phases,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store,
run_on_block,
apply_next_epoch_with_attestations,
)
from eth2spec.test.helpers.state import (
next_epoch,
state_transition_and_sign_block,
)
@with_all_phases
@spec_state_test
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
"""
NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint
"""
# Initialization
store = get_genesis_forkchoice_store(spec, state)
next_epoch(spec, state)
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
state, store, last_signed_block = yield from apply_next_epoch_with_attestations(
spec, state, store, True, False)
last_block_root = hash_tree_root(last_signed_block.message)
# NOTE: Mock fictitious justified checkpoint in store
store.justified_checkpoint = spec.Checkpoint(
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
)
next_epoch(spec, state)
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
# Create new higher justified checkpoint not in branch of store's justified checkpoint
just_block = build_empty_block_for_next_slot(spec, state)
store.blocks[just_block.hash_tree_root()] = just_block
# Step time past safe slots
spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
previously_finalized = store.finalized_checkpoint
previously_justified = store.justified_checkpoint
# Add a series of new blocks with "better" justifications
best_justified_checkpoint = spec.Checkpoint(epoch=0)
for i in range(3, 0, -1):
# Mutate store
just_state = store.block_states[last_block_root]
new_justified = spec.Checkpoint(
epoch=previously_justified.epoch + i,
root=just_block.hash_tree_root(),
)
if new_justified.epoch > best_justified_checkpoint.epoch:
best_justified_checkpoint = new_justified
just_state.current_justified_checkpoint = new_justified
block = build_empty_block_for_next_slot(spec, just_state)
signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
# NOTE: Mock store so that the modified state could be accessed
parent_block = store.blocks[last_block_root].copy()
parent_block.state_root = just_state.hash_tree_root()
store.blocks[block.parent_root] = parent_block
store.block_states[block.parent_root] = just_state.copy()
assert block.parent_root in store.blocks.keys()
assert block.parent_root in store.block_states.keys()
run_on_block(spec, store, signed_block)
assert store.finalized_checkpoint == previously_finalized
assert store.justified_checkpoint == previously_justified
# ensure the best from the series was stored
assert store.best_justified_checkpoint == best_justified_checkpoint

View File

@@ -18,7 +18,6 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False):
assert store.time == time
if new_justified_checkpoint:
assert store.justified_checkpoint == store.best_justified_checkpoint
assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch
assert store.justified_checkpoint.root != previous_justified_checkpoint.root
else:
@@ -32,12 +31,12 @@ def test_basic(spec, state):
run_on_tick(spec, store, store.time + 1)
"""
@with_all_phases
@spec_state_test
def test_update_justified_single_on_store_finalized_chain(spec, state):
store = get_genesis_forkchoice_store(spec, state)
# [Mock store.best_justified_checkpoint]
# Create a block at epoch 1
next_epoch(spec, state)
block = build_empty_block_for_next_slot(spec, state)
@@ -58,8 +57,6 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
state_transition_and_sign_block(spec, state, block)
store.blocks[block.hash_tree_root()] = block
store.block_states[block.hash_tree_root()] = state
# Mock store.best_justified_checkpoint
store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
run_on_tick(
spec,
@@ -67,6 +64,7 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
new_justified_checkpoint=True
)
"""
@with_all_phases
@@ -89,7 +87,6 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
root=block.hash_tree_root(),
)
# [Mock store.best_justified_checkpoint]
# Create a block at epoch 1
state = init_state.copy()
next_epoch(spec, state)
@@ -112,79 +109,9 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
state_transition_and_sign_block(spec, state, block)
store.blocks[block.hash_tree_root()] = block.copy()
store.block_states[block.hash_tree_root()] = state.copy()
# Mock store.best_justified_checkpoint
store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
run_on_tick(
spec,
store,
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
)
@with_all_phases
@spec_state_test
def test_no_update_same_slot_at_epoch_boundary(spec, state):
store = get_genesis_forkchoice_store(spec, state)
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
store.best_justified_checkpoint = spec.Checkpoint(
epoch=store.justified_checkpoint.epoch + 1,
root=b'\x55' * 32,
)
# set store time to already be at epoch boundary
store.time = seconds_per_epoch
run_on_tick(spec, store, store.time + 1)
@with_all_phases
@spec_state_test
def test_no_update_not_epoch_boundary(spec, state):
store = get_genesis_forkchoice_store(spec, state)
store.best_justified_checkpoint = spec.Checkpoint(
epoch=store.justified_checkpoint.epoch + 1,
root=b'\x55' * 32,
)
run_on_tick(spec, store, store.time + spec.config.SECONDS_PER_SLOT)
@with_all_phases
@spec_state_test
def test_no_update_new_justified_equal_epoch(spec, state):
store = get_genesis_forkchoice_store(spec, state)
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
store.best_justified_checkpoint = spec.Checkpoint(
epoch=store.justified_checkpoint.epoch + 1,
root=b'\x55' * 32,
)
store.justified_checkpoint = spec.Checkpoint(
epoch=store.best_justified_checkpoint.epoch,
root=b'\44' * 32,
)
run_on_tick(spec, store, store.time + seconds_per_epoch)
@with_all_phases
@spec_state_test
def test_no_update_new_justified_later_epoch(spec, state):
store = get_genesis_forkchoice_store(spec, state)
seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
store.best_justified_checkpoint = spec.Checkpoint(
epoch=store.justified_checkpoint.epoch + 1,
root=b'\x55' * 32,
)
store.justified_checkpoint = spec.Checkpoint(
epoch=store.best_justified_checkpoint.epoch + 1,
root=b'\44' * 32,
)
run_on_tick(spec, store, store.time + seconds_per_epoch)

View File

@@ -235,7 +235,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random(
def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)):
block = random_block_capella(spec, state, signed_blocks, scenario_state)
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1)
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1)
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block.body.blob_kzg_commitments = blob_kzg_commitments

View File

@@ -1,28 +1,50 @@
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
from py_ecc.optimized_bls12_381 import ( # noqa: F401
G1,
G2,
Z1,
Z2,
FQ,
add,
multiply,
neg,
pairing,
final_exponentiate,
FQ12
G1 as py_ecc_G1,
G2 as py_ecc_G2,
Z1 as py_ecc_Z1,
add as py_ecc_add,
multiply as py_ecc_mul,
neg as py_ecc_neg,
pairing as py_ecc_pairing,
final_exponentiate as py_ecc_final_exponentiate,
FQ12 as py_ecc_GT,
)
from py_ecc.bls.g2_primitives import ( # noqa: F401
G1_to_pubkey as G1_to_bytes48,
pubkey_to_G1 as bytes48_to_G1,
G2_to_signature as G2_to_bytes96,
signature_to_G2 as bytes96_to_G2,
curve_order as BLS_MODULUS,
G1_to_pubkey as py_ecc_G1_to_bytes48,
pubkey_to_G1 as py_ecc_bytes48_to_G1,
G2_to_signature as py_ecc_G2_to_bytes96,
signature_to_G2 as py_ecc_bytes96_to_G2,
)
from py_arkworks_bls12381 import (
G1Point as arkworks_G1,
G2Point as arkworks_G2,
Scalar as arkworks_Scalar,
GT as arkworks_GT,
)
import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option
import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option
class fastest_bls:
G1 = arkworks_G1
G2 = arkworks_G2
Scalar = arkworks_Scalar
GT = arkworks_GT
_AggregatePKs = milagro_bls._AggregatePKs
Sign = milagro_bls.Sign
Verify = milagro_bls.Verify
Aggregate = milagro_bls.Aggregate
AggregateVerify = milagro_bls.AggregateVerify
FastAggregateVerify = milagro_bls.FastAggregateVerify
SkToPk = milagro_bls.SkToPk
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
bls_active = True
@@ -43,6 +65,14 @@ def use_milagro():
bls = milagro_bls
def use_arkworks():
"""
Shortcut to use Arkworks as BLS library
"""
global bls
bls = arkworks_bls
def use_py_ecc():
"""
Shortcut to use Py-ecc as BLS library
@@ -51,6 +81,14 @@ def use_py_ecc():
bls = py_ecc_bls
def use_fastest():
"""
Shortcut to use Milagro for signatures and Arkworks for other BLS operations
"""
global bls
bls = fastest_bls
def only_with_bls(alt_return=None):
"""
Decorator factory to make a function only run when BLS is active. Otherwise return the default.
@@ -68,7 +106,10 @@ def only_with_bls(alt_return=None):
@only_with_bls(alt_return=True)
def Verify(PK, message, signature):
try:
result = bls.Verify(PK, message, signature)
if bls == arkworks_bls: # no signature API in arkworks
result = py_ecc_bls.Verify(PK, message, signature)
else:
result = bls.Verify(PK, message, signature)
except Exception:
result = False
finally:
@@ -78,7 +119,10 @@ def Verify(PK, message, signature):
@only_with_bls(alt_return=True)
def AggregateVerify(pubkeys, messages, signature):
try:
result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
if bls == arkworks_bls: # no signature API in arkworks
result = py_ecc_bls.AggregateVerify(list(pubkeys), list(messages), signature)
else:
result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
except Exception:
result = False
finally:
@@ -88,7 +132,10 @@ def AggregateVerify(pubkeys, messages, signature):
@only_with_bls(alt_return=True)
def FastAggregateVerify(pubkeys, message, signature):
try:
result = bls.FastAggregateVerify(list(pubkeys), message, signature)
if bls == arkworks_bls: # no signature API in arkworks
result = py_ecc_bls.FastAggregateVerify(list(pubkeys), message, signature)
else:
result = bls.FastAggregateVerify(list(pubkeys), message, signature)
except Exception:
result = False
finally:
@@ -97,12 +144,16 @@ def FastAggregateVerify(pubkeys, message, signature):
@only_with_bls(alt_return=STUB_SIGNATURE)
def Aggregate(signatures):
if bls == arkworks_bls: # no signature API in arkworks
return py_ecc_bls.Aggregate(signatures)
return bls.Aggregate(signatures)
@only_with_bls(alt_return=STUB_SIGNATURE)
def Sign(SK, message):
if bls == py_ecc_bls:
if bls == arkworks_bls: # no signature API in arkworks
return py_ecc_bls.Sign(SK, message)
elif bls == py_ecc_bls:
return bls.Sign(SK, message)
else:
return bls.Sign(SK.to_bytes(32, 'big'), message)
@@ -121,24 +172,143 @@ def AggregatePKs(pubkeys):
# milagro_bls._AggregatePKs checks KeyValidate internally
pass
if bls == arkworks_bls: # no signature API in arkworks
return py_ecc_bls._AggregatePKs(list(pubkeys))
return bls._AggregatePKs(list(pubkeys))
@only_with_bls(alt_return=STUB_SIGNATURE)
def SkToPk(SK):
if bls == py_ecc_bls:
return bls.SkToPk(SK)
if bls == py_ecc_bls or bls == arkworks_bls: # no signature API in arkworks
return py_ecc_bls.SkToPk(SK)
else:
return bls.SkToPk(SK.to_bytes(32, 'big'))
def pairing_check(values):
p_q_1, p_q_2 = values
final_exponentiation = final_exponentiate(
pairing(p_q_1[1], p_q_1[0], final_exponentiate=False)
* pairing(p_q_2[1], p_q_2[0], final_exponentiate=False)
)
return final_exponentiation == FQ12.one()
if bls == arkworks_bls or bls == fastest_bls:
p_q_1, p_q_2 = values
g1s = [p_q_1[0], p_q_2[0]]
g2s = [p_q_1[1], p_q_2[1]]
return arkworks_GT.multi_pairing(g1s, g2s) == arkworks_GT.one()
else:
p_q_1, p_q_2 = values
final_exponentiation = py_ecc_final_exponentiate(
py_ecc_pairing(p_q_1[1], p_q_1[0], final_exponentiate=False)
* py_ecc_pairing(p_q_2[1], p_q_2[0], final_exponentiate=False)
)
return final_exponentiation == py_ecc_GT.one()
def add(lhs, rhs):
"""
Performs point addition of `lhs` and `rhs`.
The points can either be in G1 or G2.
"""
if bls == arkworks_bls or bls == fastest_bls:
return lhs + rhs
return py_ecc_add(lhs, rhs)
def multiply(point, scalar):
"""
Performs Scalar multiplication between
`point` and `scalar`.
`point` can either be in G1 or G2
"""
if bls == arkworks_bls or bls == fastest_bls:
int_as_bytes = scalar.to_bytes(32, 'little')
scalar = arkworks_Scalar.from_le_bytes(int_as_bytes)
return point * scalar
return py_ecc_mul(point, scalar)
def neg(point):
"""
Returns the point negation of `point`
`point` can either be in G1 or G2
"""
if bls == arkworks_bls or bls == fastest_bls:
return -point
return py_ecc_neg(point)
def Z1():
"""
Returns the identity point in G1
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G1.identity()
return py_ecc_Z1
def G1():
"""
Returns the chosen generator point in G1
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G1()
return py_ecc_G1
def G2():
"""
Returns the chosen generator point in G2
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G2()
return py_ecc_G2
def G1_to_bytes48(point):
"""
Serializes a point in G1.
Returns a bytearray of size 48 as
we use the compressed format
"""
if bls == arkworks_bls or bls == fastest_bls:
return bytes(point.to_compressed_bytes())
return py_ecc_G1_to_bytes48(point)
def G2_to_bytes96(point):
"""
Serializes a point in G2.
Returns a bytearray of size 96 as
we use the compressed format
"""
if bls == arkworks_bls or bls == fastest_bls:
return bytes(point.to_compressed_bytes())
return py_ecc_G2_to_bytes96(point)
def bytes48_to_G1(bytes48):
"""
Deserializes a purported compressed serialized
point in G1.
- No subgroup checks are performed
- If the bytearray is not a valid serialization
of a point in G1, then this method will raise
an exception
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G1.from_compressed_bytes_unchecked(bytes48)
return py_ecc_bytes48_to_G1(bytes48)
def bytes96_to_G2(bytes96):
"""
Deserializes a purported compressed serialized
point in G2.
- No subgroup checks are performed
- If the bytearray is not a valid serialization
of a point in G2, then this method will raise
an exception
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G2.from_compressed_bytes_unchecked(bytes96)
return py_ecc_bytes96_to_G2(bytes96)
@only_with_bls(alt_return=True)

View File

@@ -146,10 +146,6 @@ finalized_checkpoint: {
epoch: int, -- Integer value from store.finalized_checkpoint.epoch
root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root
}
best_justified_checkpoint: {
epoch: int, -- Integer value from store.best_justified_checkpoint.epoch
root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root
}
proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root
```
@@ -160,7 +156,6 @@ For example:
head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'}
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
```

View File

@@ -0,0 +1,15 @@
# KZG tests
A test type for KZG libraries. Tests all the public interfaces that a KZG library required to implement EIP-4844 needs to provide, as defined in `polynomial-commitments.md`.
We do not recommend rolling your own crypto or using an untested KZG library.
The KZG test suite runner has the following handlers:
- [`blob_to_kzg_commitment`](./blob_to_kzg_commitment.md)
- [`compute_kzg_proof`](./compute_kzg_proof.md)
- [`verify_kzg_proof`](./verify_kzg_proof.md)
- [`compute_blob_kzg_proof`](./compute_blob_kzg_proof.md)
- [`verify_blob_kzg_proof`](./verify_blob_kzg_proof.md)
- [`verify_blob_kzg_proof_batch`](./verify_blob_kzg_proof_batch.md)

View File

@@ -0,0 +1,21 @@
# Test format: Blob to KZG commitment
Compute the KZG commitment for a given `blob`.
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
blob: Blob -- the data blob
output: KZGCommitment -- The KZG commitment
```
- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `blob_to_kzg_commitment` handler should compute the KZG commitment for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`.

View File

@@ -0,0 +1,23 @@
# Test format: Compute blob KZG proof
Compute the blob KZG proof for a given `blob`, that helps with quickly verifying that the KZG commitment for the blob is correct.
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
blob: Blob -- the data blob
commitment: Bytes48 -- the commitment to the blob
output: KZGProof -- The blob KZG proof
```
- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
- `commitment` here is encoded as a string: hexadecimal encoding of `48` bytes, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `compute_blob_kzg_proof` handler should compute the blob KZG proof for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`.

View File

@@ -0,0 +1,24 @@
# Test format: Compute KZG proof
Compute the KZG proof for a given `blob` and an evaluation point `z`.
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
blob: Blob -- the data blob representing a polynomial
z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated
output: Tuple[KZGProof, Bytes32] -- The KZG proof and the value y = f(z)
```
- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `compute_kzg_proof` handler should compute the KZG proof as well as the value `y` for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`.

View File

@@ -0,0 +1,23 @@
# Test format: Verify blob KZG proof
Use the blob KZG proof to verify that the KZG commitment for a given `blob` is correct
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
blob: Blob -- the data blob
commitment: KZGCommitment -- the KZG commitment to the data blob
proof: KZGProof -- The KZG proof
output: bool -- true (valid proof) or false (incorrect proof)
```
- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `verify_blob_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `blob` by using the blob KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `blob` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`.

View File

@@ -0,0 +1,23 @@
# Test format: Verify blob KZG proof batch
Use the blob KZG proofs to verify that the KZG commitments for given `blob`s are correct
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
blob: List[Blob] -- the data blob
commitment: List[KZGCommitment] -- the KZG commitment to the data blob
proof: List[KZGProof] -- The KZG proof
output: bool -- true (all proofs are valid) or false (some proofs incorrect)
```
- `blob`s here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `verify_blob_kzg_proof_batch` handler should verify that `commitments` are correct KZG commitments to `blobs` by using the blob KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or any blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`.

View File

@@ -0,0 +1,25 @@
# Test format: Verify KZG proof
Verify the KZG proof for a given `blob` and an evaluation point `z` that claims to result in a value of `y`.
## Test case format
The test data is declared in a `data.yaml` file:
```yaml
input:
commitment: KZGCommitment -- the KZG commitment to the data blob
z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated
y: Bytes32 -- the claimed result of the evaluation
proof: KZGProof -- The KZG proof
output: bool -- true (valid proof) or false (incorrect proof)
```
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `verify_kzg_proof` handler should verify the KZG proof for evaluating the polynomial represented by `blob` at `z` resulting in the value `y`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS field element, it should error, i.e. the output should be `null`.

View File

@@ -1,15 +1,16 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB
if __name__ == "__main__":
phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
# Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore
altair_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
'get_head',
'on_block',
'ex_ante',
'reorg',
'withholding',
]}
# No additional Altair specific finality tests, yet.
altair_mods = phase_0_mods
# For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several
# PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function
@@ -21,7 +22,6 @@ if __name__ == "__main__":
deneb_mods = capella_mods # No additional Capella specific fork choice tests
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
BELLATRIX: bellatrix_mods,
CAPELLA: capella_mods,

View File

@@ -0,0 +1,3 @@
# KZG 4844 Test Generator
These tests are specific to the KZG API required for implementing EIP-4844

View File

@@ -0,0 +1,801 @@
"""
KZG 4844 test vectors generator
"""
from hashlib import sha256
from typing import Tuple, Iterable, Any, Callable, Dict
from eth_utils import (
encode_hex,
int_to_big_endian,
)
from eth2spec.utils import bls
from eth2spec.test.helpers.constants import DENEB
from eth2spec.test.helpers.typing import SpecForkName
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
from eth2spec.deneb import spec
def expect_exception(func, *args):
try:
func(*args)
except Exception:
pass
else:
raise Exception("should have raised exception")
def field_element_bytes(x):
return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.ENDIANNESS)
def field_element_bytes_unchecked(x):
return int.to_bytes(x, 32, spec.ENDIANNESS)
def encode_hex_list(a):
return [encode_hex(x) for x in a]
def bls_add_one(x):
"""
Adds "one" (actually bls.G1()) to a compressed group element.
Useful to compute definitely incorrect proofs.
"""
return bls.G1_to_bytes48(
bls.add(bls.bytes48_to_G1(x), bls.G1())
)
def evaluate_blob_at(blob, z):
return field_element_bytes(
spec.evaluate_polynomial_in_evaluation_form(spec.blob_to_polynomial(blob), spec.bytes_to_bls_field(z))
)
G1 = bls.G1_to_bytes48(bls.G1())
P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
"0123456789abcdef0123456789abcdef0123456789abcdef")
P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
"0123456789abcdef0123456789abcdef0123456789abcde0")
BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS)
BLOB_ALL_ZEROS = spec.Blob()
BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
BLOB_RANDOM_VALID2 = spec.Blob(b''.join([field_element_bytes(pow(3, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
BLOB_RANDOM_VALID3 = spec.Blob(b''.join([field_element_bytes(pow(5, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
BLOB_ALL_MODULUS_MINUS_ONE = spec.Blob(b''.join([field_element_bytes(spec.BLS_MODULUS - 1) for n in range(4096)]))
BLOB_ALMOST_ZERO = spec.Blob(b''.join([field_element_bytes(1 if n == 3211 else 0) for n in range(4096)]))
BLOB_INVALID = spec.Blob(b'\xFF' * 4096 * 32)
BLOB_INVALID_CLOSE = spec.Blob(b''.join(
[BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)]
))
BLOB_INVALID_LENGTH_PLUS_ONE = BLOB_RANDOM_VALID1 + b"\x00"
BLOB_INVALID_LENGTH_MINUS_ONE = BLOB_RANDOM_VALID1[:-1]
VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2,
BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO]
INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE, BLOB_INVALID_LENGTH_PLUS_ONE, BLOB_INVALID_LENGTH_MINUS_ONE]
FE_VALID1 = field_element_bytes(0)
FE_VALID2 = field_element_bytes(1)
FE_VALID3 = field_element_bytes(2)
FE_VALID4 = field_element_bytes(pow(5, 1235, spec.BLS_MODULUS))
FE_VALID5 = field_element_bytes(spec.BLS_MODULUS - 1)
FE_VALID6 = field_element_bytes(spec.ROOTS_OF_UNITY[1])
VALID_FIELD_ELEMENTS = [FE_VALID1, FE_VALID2, FE_VALID3, FE_VALID4, FE_VALID5, FE_VALID6]
FE_INVALID_EQUAL_TO_MODULUS = field_element_bytes_unchecked(spec.BLS_MODULUS)
FE_INVALID_MODULUS_PLUS_ONE = field_element_bytes_unchecked(spec.BLS_MODULUS + 1)
FE_INVALID_UINT256_MAX = field_element_bytes_unchecked(2**256 - 1)
FE_INVALID_UINT256_MID = field_element_bytes_unchecked(2**256 - 2**128)
FE_INVALID_LENGTH_PLUS_ONE = VALID_FIELD_ELEMENTS[0] + b"\x00"
FE_INVALID_LENGTH_MINUS_ONE = VALID_FIELD_ELEMENTS[0][:-1]
INVALID_FIELD_ELEMENTS = [FE_INVALID_EQUAL_TO_MODULUS, FE_INVALID_MODULUS_PLUS_ONE,
FE_INVALID_UINT256_MAX, FE_INVALID_UINT256_MID,
FE_INVALID_LENGTH_PLUS_ONE, FE_INVALID_LENGTH_MINUS_ONE]
def hash(x):
return sha256(x).digest()
def int_to_hex(n: int, byte_length: int = None) -> str:
byte_value = int_to_big_endian(n)
if byte_length:
byte_value = byte_value.rjust(byte_length, b'\x00')
return encode_hex(byte_value)
def case01_blob_to_kzg_commitment():
# Valid cases
for blob in VALID_BLOBS:
commitment = spec.blob_to_kzg_commitment(blob)
identifier = f'{encode_hex(hash(blob))}'
yield f'blob_to_kzg_commitment_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
},
'output': encode_hex(commitment)
}
# Edge case: Invalid blobs
for blob in INVALID_BLOBS:
identifier = f'{encode_hex(hash(blob))}'
expect_exception(spec.blob_to_kzg_commitment, blob)
yield f'blob_to_kzg_commitment_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob)
},
'output': None
}
def case02_compute_kzg_proof():
# Valid cases
for blob in VALID_BLOBS:
for z in VALID_FIELD_ELEMENTS:
proof, y = spec.compute_kzg_proof(blob, z)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'z': encode_hex(z),
},
'output': (encode_hex(proof), encode_hex(y))
}
# Edge case: Invalid blobs
for blob in INVALID_BLOBS:
z = VALID_FIELD_ELEMENTS[0]
expect_exception(spec.compute_kzg_proof, blob, z)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'z': encode_hex(z),
},
'output': None
}
# Edge case: Invalid z
for z in INVALID_FIELD_ELEMENTS:
blob = VALID_BLOBS[4]
expect_exception(spec.compute_kzg_proof, blob, z)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'z': encode_hex(z),
},
'output': None
}
def case03_verify_kzg_proof():
# Valid cases
for blob in VALID_BLOBS:
for z in VALID_FIELD_ELEMENTS:
proof, y = spec.compute_kzg_proof(blob, z)
commitment = spec.blob_to_kzg_commitment(blob)
assert spec.verify_kzg_proof(commitment, z, y, proof)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': True
}
# Incorrect proofs
for blob in VALID_BLOBS:
for z in VALID_FIELD_ELEMENTS:
proof_orig, y = spec.compute_kzg_proof(blob, z)
proof = bls_add_one(proof_orig)
commitment = spec.blob_to_kzg_commitment(blob)
assert not spec.verify_kzg_proof(commitment, z, y, proof)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': False
}
# Edge case: Invalid z
for z in INVALID_FIELD_ELEMENTS:
blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1]
proof, y = spec.compute_kzg_proof(blob, validz)
commitment = spec.blob_to_kzg_commitment(blob)
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid y
for y in INVALID_FIELD_ELEMENTS:
blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1]
proof, _ = spec.compute_kzg_proof(blob, z)
commitment = spec.blob_to_kzg_commitment(blob)
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
identifier = f'{encode_hex(hash(blob))}_{encode_hex(y)}'
yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, not in G1
blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[0]
proof = P1_NOT_IN_G1
commitment = spec.blob_to_kzg_commitment(blob)
y = VALID_FIELD_ELEMENTS[1]
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_proof_not_in_G1', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, not on curve
blob, z = VALID_BLOBS[3], VALID_FIELD_ELEMENTS[1]
proof = P1_NOT_ON_CURVE
commitment = spec.blob_to_kzg_commitment(blob)
y = VALID_FIELD_ELEMENTS[1]
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_proof_not_on_curve', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, too few bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
z = VALID_FIELD_ELEMENTS[4]
proof, y = spec.compute_kzg_proof(blob, z)
proof = proof[:-1]
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_proof_too_few_bytes', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, too many bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
z = VALID_FIELD_ELEMENTS[4]
proof, y = spec.compute_kzg_proof(blob, z)
proof = proof + b"\x00"
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_proof_too_many_bytes', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, not in G1
blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[3]
proof, y = spec.compute_kzg_proof(blob, z)
commitment = P1_NOT_IN_G1
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_commitment_not_in_G1', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, not on curve
blob, z = VALID_BLOBS[1], VALID_FIELD_ELEMENTS[4]
proof, y = spec.compute_kzg_proof(blob, z)
commitment = P1_NOT_ON_CURVE
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_commitment_not_on_curve', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, too few bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)[:-1]
z = VALID_FIELD_ELEMENTS[4]
proof, y = spec.compute_kzg_proof(blob, z)
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_commitment_too_few_bytes', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, too many bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob) + b"\x00"
z = VALID_FIELD_ELEMENTS[4]
proof, y = spec.compute_kzg_proof(blob, z)
expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
yield 'verify_kzg_proof_case_commitment_too_many_bytes', {
'input': {
'commitment': encode_hex(commitment),
'z': encode_hex(z),
'y': encode_hex(y),
'proof': encode_hex(proof),
},
'output': None
}
def case04_compute_blob_kzg_proof():
# Valid cases
for blob in VALID_BLOBS:
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
identifier = f'{encode_hex(hash(blob))}'
yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
},
'output': encode_hex(proof)
}
# Edge case: Invalid blob
for blob in INVALID_BLOBS:
commitment = G1
expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
identifier = f'{encode_hex(hash(blob))}'
yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
},
'output': None
}
# Edge case: Invalid commitment, not in G1
commitment = P1_NOT_IN_G1
blob = VALID_BLOBS[1]
expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
identifier = f'{encode_hex(hash(blob))}'
yield 'compute_blob_kzg_proof_case_invalid_commitment_not_in_G1', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
},
'output': None
}
# Edge case: Invalid commitment, not on curve
commitment = P1_NOT_ON_CURVE
blob = VALID_BLOBS[1]
expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
identifier = f'{encode_hex(hash(blob))}'
yield 'compute_blob_kzg_proof_case_invalid_commitment_not_on_curve', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
},
'output': None
}
def case05_verify_blob_kzg_proof():
# Valid cases
for blob in VALID_BLOBS:
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
assert spec.verify_blob_kzg_proof(blob, commitment, proof)
identifier = f'{encode_hex(hash(blob))}'
yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': True
}
# Incorrect proofs
for blob in VALID_BLOBS:
commitment = spec.blob_to_kzg_commitment(blob)
proof = bls_add_one(spec.compute_blob_kzg_proof(blob, commitment))
assert not spec.verify_blob_kzg_proof(blob, commitment, proof)
identifier = f'{encode_hex(hash(blob))}'
yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': False
}
# Edge case: Invalid proof, not in G1
blob = VALID_BLOBS[2]
proof = P1_NOT_IN_G1
commitment = G1
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_proof_not_in_G1', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, not on curve
blob = VALID_BLOBS[1]
proof = P1_NOT_ON_CURVE
commitment = G1
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_proof_not_on_curve', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, too few bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)[:-1]
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_proof_too_few_bytes', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid proof, too many bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment) + b"\x00"
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_proof_too_many_bytes', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, not in G1
blob = VALID_BLOBS[0]
proof = G1
commitment = P1_NOT_IN_G1
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_commitment_not_in_G1', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, not on curve
blob = VALID_BLOBS[2]
proof = G1
commitment = P1_NOT_ON_CURVE
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_commitment_not_on_curve', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, too few bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
commitment = commitment[:-1]
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_commitment_too_few_bytes', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid commitment, too many bytes
blob = VALID_BLOBS[1]
commitment = spec.blob_to_kzg_commitment(blob)
proof = spec.compute_blob_kzg_proof(blob, commitment)
commitment = commitment + b"\x00"
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
yield 'verify_blob_kzg_proof_case_commitment_too_many_bytes', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
# Edge case: Invalid blob
for blob in INVALID_BLOBS:
proof = G1
commitment = G1
expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
identifier = f'{encode_hex(hash(blob))}'
yield f'verify_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blob': encode_hex(blob),
'commitment': encode_hex(commitment),
'proof': encode_hex(proof),
},
'output': None
}
def case06_verify_blob_kzg_proof_batch():
# Valid cases
proofs = []
commitments = []
for blob in VALID_BLOBS:
commitments.append(spec.blob_to_kzg_commitment(blob))
proofs.append(spec.compute_blob_kzg_proof(blob, commitments[-1]))
for i in range(len(proofs)):
assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i])
identifier = f'{encode_hex(hash(b"".join(VALID_BLOBS[:i])))}'
yield f'verify_blob_kzg_proof_batch_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS[:i]),
'commitments': encode_hex_list(commitments[:i]),
'proofs': encode_hex_list(proofs[:i]),
},
'output': True
}
# Incorrect proof
proofs_incorrect = [bls_add_one(proofs[0])] + proofs[1:]
assert not spec.verify_blob_kzg_proof_batch(VALID_BLOBS, commitments, proofs_incorrect)
yield 'verify_blob_kzg_proof_batch_case_invalid_proof', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs_incorrect),
},
'output': False
}
# Edge case: Invalid blobs
for blob in INVALID_BLOBS:
blobs_invalid = VALID_BLOBS[:4] + [blob] + VALID_BLOBS[5:]
expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs)
identifier = f'{encode_hex(hash(blob))}'
yield f'verify_blob_kzg_proof_batch_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'blobs': encode_hex_list(blobs_invalid),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Invalid proof, not in G1
proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1)
yield 'verify_blob_kzg_proof_batch_case_proof_not_in_G1', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs_invalid_notG1),
},
'output': None
}
# Edge case: Invalid proof, not on curve
proofs_invalid_notCurve = proofs[:1] + [P1_NOT_ON_CURVE] + proofs[2:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notCurve)
yield 'verify_blob_kzg_proof_batch_case_proof_not_on_curve', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs_invalid_notCurve),
},
'output': None
}
# Edge case: Invalid proof, too few bytes
proofs_invalid_tooFewBytes = proofs[:1] + [proofs[1][:-1]] + proofs[2:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooFewBytes)
yield 'verify_blob_kzg_proof_batch_case_proof_too_few_bytes', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs_invalid_tooFewBytes),
},
'output': None
}
# Edge case: Invalid proof, too many bytes
proofs_invalid_tooManyBytes = proofs[:1] + [proofs[1] + b"\x00"] + proofs[2:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooManyBytes)
yield 'verify_blob_kzg_proof_batch_case_proof_too_many_bytes', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs_invalid_tooManyBytes),
},
'output': None
}
# Edge case: Invalid commitment, not in G1
commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1)
yield 'verify_blob_kzg_proof_batch_case_commitment_not_in_G1', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments_invalid_notG1),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Invalid commitment, not on curve
commitments_invalid_notCurve = commitments[:3] + [P1_NOT_ON_CURVE] + commitments[4:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notCurve)
yield 'verify_blob_kzg_proof_batch_case_not_on_curve', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments_invalid_notCurve),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Invalid commitment, too few bytes
commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes)
yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments_invalid_tooFewBytes),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Invalid commitment, too many bytes
commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:]
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes)
yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments_invalid_tooManyBytes),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Blob length different
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs)
yield 'verify_blob_kzg_proof_batch_case_blob_length_different', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS[:-1]),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Commitment length different
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments[:-1], proofs)
yield 'verify_blob_kzg_proof_batch_case_commitment_length_different', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments[:-1]),
'proofs': encode_hex_list(proofs),
},
'output': None
}
# Edge case: Proof length different
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs[:-1])
yield 'verify_blob_kzg_proof_batch_case_proof_length_different', {
'input': {
'blobs': encode_hex_list(VALID_BLOBS),
'commitments': encode_hex_list(commitments),
'proofs': encode_hex_list(proofs[:-1]),
},
'output': None
}
def create_provider(fork_name: SpecForkName,
handler_name: str,
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
def prepare_fn() -> None:
# Nothing to load / change in spec. Maybe in future forks.
# Put the tests into the general config category, to not require any particular configuration.
return
def cases_fn() -> Iterable[gen_typing.TestCase]:
for data in test_case_fn():
(case_name, case_content) = data
yield gen_typing.TestCase(
fork_name=fork_name,
preset_name='general',
runner_name='kzg',
handler_name=handler_name,
suite_name='small',
case_name=case_name,
case_fn=lambda: [('data', 'data', case_content)]
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
if __name__ == "__main__":
bls.use_arkworks()
gen_runner.run_generator("kzg", [
# DENEB
create_provider(DENEB, 'blob_to_kzg_commitment', case01_blob_to_kzg_commitment),
create_provider(DENEB, 'compute_kzg_proof', case02_compute_kzg_proof),
create_provider(DENEB, 'verify_kzg_proof', case03_verify_kzg_proof),
create_provider(DENEB, 'compute_blob_kzg_proof', case04_compute_blob_kzg_proof),
create_provider(DENEB, 'verify_blob_kzg_proof', case05_verify_blob_kzg_proof),
create_provider(DENEB, 'verify_blob_kzg_proof_batch', case06_verify_blob_kzg_proof_batch),
])

View File

@@ -0,0 +1,2 @@
pytest>=4.4
../../../[generator]