mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-02-01 18:04:57 -05:00
Merge branch 'dev' into pr3311
This commit is contained in:
@@ -157,7 +157,7 @@ jobs:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-eip6110:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
@@ -176,7 +176,7 @@ jobs:
|
||||
- checkout
|
||||
- run:
|
||||
name: Check table of contents
|
||||
command: sudo npm install -g doctoc@2 && make check_toc
|
||||
command: sudo npm install -g doctoc@2.2.0 && make check_toc
|
||||
codespell:
|
||||
docker:
|
||||
- image: circleci/python:3.9
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -20,5 +20,5 @@ jobs:
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- run: pip install -e .[docs]
|
||||
- run: pip install mkdocs==1.4.2 mkdocs-material==9.1.5 mdx-truly-sane-lists==1.3 mkdocs-awesome-pages-plugin==2.8.0
|
||||
- run: mkdocs gh-deploy --force
|
||||
|
||||
2
.github/workflows/run-tests.yml
vendored
2
.github/workflows/run-tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Check table of contents
|
||||
run: sudo npm install -g doctoc@2 && make check_toc
|
||||
run: sudo npm install -g doctoc@2.2.0 && make check_toc
|
||||
|
||||
codespell:
|
||||
runs-on: self-hosted
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,6 +21,7 @@ tests/core/pyspec/eth2spec/bellatrix/
|
||||
tests/core/pyspec/eth2spec/capella/
|
||||
tests/core/pyspec/eth2spec/deneb/
|
||||
tests/core/pyspec/eth2spec/eip6110/
|
||||
tests/core/pyspec/eth2spec/whisk/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
|
||||
6
Makefile
6
Makefile
@@ -34,7 +34,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
|
||||
$(wildcard $(SSZ_DIR)/*.md)
|
||||
|
||||
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110
|
||||
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110 whisk
|
||||
# The parameters for commands. Use `foreach` to avoid listing specs again.
|
||||
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
||||
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
|
||||
@@ -146,8 +146,8 @@ codespell:
|
||||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||
&& pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
|
||||
&& python -m pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
|
||||
&& python -m mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
|
||||
|
||||
lint_generators: pyspec
|
||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||
|
||||
@@ -53,6 +53,9 @@ DENEB_FORK_EPOCH: 18446744073709551615
|
||||
# EIP6110
|
||||
EIP6110_FORK_VERSION: 0x05000000 # temporary stub
|
||||
EIP6110_FORK_EPOCH: 18446744073709551615
|
||||
# WHISK
|
||||
WHISK_FORK_VERSION: 0x06000000 # temporary stub
|
||||
WHISK_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
@@ -94,3 +97,43 @@ PROPOSER_SCORE_BOOST: 40
|
||||
DEPOSIT_CHAIN_ID: 1
|
||||
DEPOSIT_NETWORK_ID: 1
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
|
||||
|
||||
|
||||
# Networking
|
||||
# ---------------------------------------------------------------
|
||||
# `10 * 2**20` (= 10485760, 10 MiB)
|
||||
GOSSIP_MAX_SIZE: 10485760
|
||||
# `2**10` (= 1024)
|
||||
MAX_REQUEST_BLOCKS: 1024
|
||||
# `2**8` (= 256)
|
||||
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
|
||||
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
|
||||
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
|
||||
# `10 * 2**20` (=10485760, 10 MiB)
|
||||
MAX_CHUNK_SIZE: 10485760
|
||||
# 5s
|
||||
TTFB_TIMEOUT: 5
|
||||
# 10s
|
||||
RESP_TIMEOUT: 10
|
||||
ATTESTATION_PROPAGATION_SLOT_RANGE: 32
|
||||
# 500ms
|
||||
MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500
|
||||
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
|
||||
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
|
||||
# 2 subnets per node
|
||||
SUBNETS_PER_NODE: 2
|
||||
# 2**8 (= 64)
|
||||
ATTESTATION_SUBNET_COUNT: 64
|
||||
ATTESTATION_SUBNET_EXTRA_BITS: 0
|
||||
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
|
||||
ATTESTATION_SUBNET_PREFIX_BITS: 6
|
||||
|
||||
# Deneb
|
||||
# `2**7` (=128)
|
||||
MAX_REQUEST_BLOCKS_DENEB: 128
|
||||
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
|
||||
MAX_REQUEST_BLOB_SIDECARS: 768
|
||||
# `2**12` (= 4096 epochs, ~18 days)
|
||||
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
|
||||
# `6`
|
||||
BLOB_SIDECAR_SUBNET_COUNT: 6
|
||||
|
||||
@@ -52,6 +52,9 @@ DENEB_FORK_EPOCH: 18446744073709551615
|
||||
# EIP6110
|
||||
EIP6110_FORK_VERSION: 0x05000001
|
||||
EIP6110_FORK_EPOCH: 18446744073709551615
|
||||
# WHISK
|
||||
WHISK_FORK_VERSION: 0x06000001
|
||||
WHISK_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
@@ -95,3 +98,43 @@ DEPOSIT_CHAIN_ID: 5
|
||||
DEPOSIT_NETWORK_ID: 5
|
||||
# Configured on a per testnet basis
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
|
||||
|
||||
# Networking
|
||||
# ---------------------------------------------------------------
|
||||
# `10 * 2**20` (= 10485760, 10 MiB)
|
||||
GOSSIP_MAX_SIZE: 10485760
|
||||
# `2**10` (= 1024)
|
||||
MAX_REQUEST_BLOCKS: 1024
|
||||
# `2**8` (= 256)
|
||||
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
|
||||
# [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272)
|
||||
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272
|
||||
# `10 * 2**20` (=10485760, 10 MiB)
|
||||
MAX_CHUNK_SIZE: 10485760
|
||||
# 5s
|
||||
TTFB_TIMEOUT: 5
|
||||
# 10s
|
||||
RESP_TIMEOUT: 10
|
||||
ATTESTATION_PROPAGATION_SLOT_RANGE: 32
|
||||
# 500ms
|
||||
MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500
|
||||
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
|
||||
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
|
||||
# 2 subnets per node
|
||||
SUBNETS_PER_NODE: 2
|
||||
# 2**8 (= 64)
|
||||
ATTESTATION_SUBNET_COUNT: 64
|
||||
ATTESTATION_SUBNET_EXTRA_BITS: 0
|
||||
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
|
||||
ATTESTATION_SUBNET_PREFIX_BITS: 6
|
||||
|
||||
# Deneb
|
||||
# `2**7` (=128)
|
||||
MAX_REQUEST_BLOCKS_DENEB: 128
|
||||
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
|
||||
MAX_REQUEST_BLOB_SIDECARS: 768
|
||||
# `2**12` (= 4096 epochs, ~18 days)
|
||||
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
|
||||
# `6`
|
||||
BLOB_SIDECAR_SUBNET_COUNT: 6
|
||||
|
||||
@@ -4,5 +4,7 @@
|
||||
# ---------------------------------------------------------------
|
||||
# `uint64(4096)`
|
||||
FIELD_ELEMENTS_PER_BLOB: 4096
|
||||
# `uint64(2**2)` (= 4)
|
||||
MAX_BLOBS_PER_BLOCK: 4
|
||||
# `uint64(2**12)` (= 4096)
|
||||
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
|
||||
# `uint64(6)`
|
||||
MAX_BLOBS_PER_BLOCK: 6
|
||||
|
||||
@@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128
|
||||
# 2**4 (= 16)
|
||||
MAX_DEPOSITS: 16
|
||||
# 2**4 (= 16)
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
@@ -4,5 +4,7 @@
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
FIELD_ELEMENTS_PER_BLOB: 4
|
||||
# `uint64(2**2)` (= 4)
|
||||
MAX_BLOBS_PER_BLOCK: 4
|
||||
# [customized]
|
||||
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
|
||||
# `uint64(6)`
|
||||
MAX_BLOBS_PER_BLOCK: 6
|
||||
|
||||
@@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128
|
||||
# 2**4 (= 16)
|
||||
MAX_DEPOSITS: 16
|
||||
# 2**4 (= 16)
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
224
setup.py
224
setup.py
@@ -48,7 +48,27 @@ BELLATRIX = 'bellatrix'
|
||||
CAPELLA = 'capella'
|
||||
DENEB = 'deneb'
|
||||
EIP6110 = 'eip6110'
|
||||
WHISK = 'whisk'
|
||||
|
||||
PREVIOUS_FORK_OF = {
|
||||
PHASE0: None,
|
||||
ALTAIR: PHASE0,
|
||||
BELLATRIX: ALTAIR,
|
||||
CAPELLA: BELLATRIX,
|
||||
DENEB: CAPELLA,
|
||||
EIP6110: DENEB,
|
||||
WHISK: CAPELLA,
|
||||
}
|
||||
|
||||
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
|
||||
|
||||
IGNORE_SPEC_FILES = [
|
||||
"specs/phase0/deposit-contract.md"
|
||||
]
|
||||
|
||||
EXTRA_SPEC_FILES = {
|
||||
BELLATRIX: "sync/optimistic.md"
|
||||
}
|
||||
|
||||
# The helper functions that are used when defining constants
|
||||
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
@@ -95,6 +115,30 @@ class SpecObject(NamedTuple):
|
||||
dataclasses: Dict[str, str]
|
||||
|
||||
|
||||
def is_post_fork(a, b) -> bool:
|
||||
"""
|
||||
Returns true if fork a is after b, or if a == b
|
||||
"""
|
||||
if a == b:
|
||||
return True
|
||||
|
||||
prev_fork = PREVIOUS_FORK_OF[a]
|
||||
if prev_fork == b:
|
||||
return True
|
||||
elif prev_fork == None:
|
||||
return False
|
||||
else:
|
||||
return is_post_fork(prev_fork, b)
|
||||
|
||||
def get_fork_directory(fork):
|
||||
dir1 = f'specs/{fork}'
|
||||
if os.path.exists(dir1):
|
||||
return dir1
|
||||
dir2 = f'specs/_features/{fork}'
|
||||
if os.path.exists(dir2):
|
||||
return dir2
|
||||
raise FileNotFoundError(f"No directory found for fork: {fork}")
|
||||
|
||||
def _get_name_from_heading(heading: Heading) -> Optional[str]:
|
||||
last_child = heading.children[-1]
|
||||
if isinstance(last_child, CodeSpan):
|
||||
@@ -279,7 +323,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
||||
elif name in config:
|
||||
config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment, None)
|
||||
else:
|
||||
if name == 'ENDIANNESS':
|
||||
if name in ('ENDIANNESS', 'KZG_ENDIANNESS'):
|
||||
# Deal with mypy Literal typing check
|
||||
value_def = _parse_value(name, value, type_hint='Final')
|
||||
constant_vars[name] = value_def
|
||||
@@ -336,6 +380,10 @@ class SpecBuilder(ABC):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
@@ -383,7 +431,7 @@ from typing import (
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64, uint256,
|
||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
|
||||
from eth2spec.utils import bls
|
||||
@@ -469,6 +517,12 @@ get_attesting_indices = cache_this(
|
||||
),
|
||||
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
return {}
|
||||
@@ -551,7 +605,7 @@ class BellatrixSpecBuilder(AltairSpecBuilder):
|
||||
return super().imports(preset_name) + f'''
|
||||
from typing import Protocol
|
||||
from eth2spec.altair import {preset_name} as altair
|
||||
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256
|
||||
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
@@ -573,9 +627,11 @@ def get_execution_state(_execution_state_root: Bytes32) -> ExecutionState:
|
||||
|
||||
|
||||
def get_pow_chain_head() -> PowBlock:
|
||||
pass
|
||||
|
||||
pass"""
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
return "\n\n" + """
|
||||
class NoopExecutionEngine(ExecutionEngine):
|
||||
|
||||
def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
@@ -588,10 +644,17 @@ class NoopExecutionEngine(ExecutionEngine):
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
pass
|
||||
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
# pylint: disable=unused-argument
|
||||
raise NotImplementedError("no default block production")
|
||||
|
||||
def is_valid_block_hash(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
return True
|
||||
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
EXECUTION_ENGINE = NoopExecutionEngine()"""
|
||||
|
||||
@@ -658,6 +721,39 @@ def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZ
|
||||
# pylint: disable=unused-argument
|
||||
return ("TEST", "TEST")'''
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
return "\n\n" + """
|
||||
class NoopExecutionEngine(ExecutionEngine):
|
||||
|
||||
def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
return True
|
||||
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
pass
|
||||
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
# pylint: disable=unused-argument
|
||||
raise NotImplementedError("no default block production")
|
||||
|
||||
def is_valid_block_hash(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
return True
|
||||
|
||||
def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPayloadRequest) -> bool:
|
||||
return True
|
||||
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
EXECUTION_ENGINE = NoopExecutionEngine()"""
|
||||
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||
constants = {
|
||||
@@ -681,9 +777,31 @@ from eth2spec.deneb import {preset_name} as deneb
|
||||
'''
|
||||
|
||||
|
||||
#
|
||||
# WhiskSpecBuilder
|
||||
#
|
||||
class WhiskSpecBuilder(CapellaSpecBuilder):
|
||||
fork: str = WHISK
|
||||
|
||||
@classmethod
|
||||
def imports(cls, preset_name: str):
|
||||
return super().imports(preset_name) + f'''
|
||||
from eth2spec.capella import {preset_name} as capella
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||
# Necessary for custom types `WhiskShuffleProof` and `WhiskTrackerProof`
|
||||
constants = {
|
||||
'WHISK_MAX_SHUFFLE_PROOF_SIZE': spec_object.constant_vars['WHISK_MAX_SHUFFLE_PROOF_SIZE'].value,
|
||||
'WHISK_MAX_OPENING_PROOF_SIZE': spec_object.constant_vars['WHISK_MAX_OPENING_PROOF_SIZE'].value,
|
||||
}
|
||||
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
|
||||
|
||||
|
||||
spec_builders = {
|
||||
builder.fork: builder
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder)
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder, WhiskSpecBuilder)
|
||||
}
|
||||
|
||||
|
||||
@@ -691,6 +809,11 @@ def is_byte_vector(value: str) -> bool:
|
||||
return value.startswith(('ByteVector'))
|
||||
|
||||
|
||||
def make_function_abstract(protocol_def: ProtocolDefinition, key: str):
|
||||
function = protocol_def.functions[key].split('"""')
|
||||
protocol_def.functions[key] = function[0] + "..."
|
||||
|
||||
|
||||
def objects_to_spec(preset_name: str,
|
||||
spec_object: SpecObject,
|
||||
builder: SpecBuilder,
|
||||
@@ -708,6 +831,11 @@ def objects_to_spec(preset_name: str,
|
||||
)
|
||||
|
||||
def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:
|
||||
abstract_functions = ["verify_and_notify_new_payload"]
|
||||
for key in protocol_def.functions.keys():
|
||||
if key in abstract_functions:
|
||||
make_function_abstract(protocol_def, key)
|
||||
|
||||
protocol = f"class {protocol_name}(Protocol):"
|
||||
for fn_source in protocol_def.functions.values():
|
||||
fn_source = fn_source.replace("self: "+protocol_name, "self")
|
||||
@@ -783,6 +911,7 @@ def objects_to_spec(preset_name: str,
|
||||
+ ('\n\n\n' + protocols_spec if protocols_spec != '' else '')
|
||||
+ '\n\n\n' + functions_spec
|
||||
+ '\n\n' + builder.sundry_functions()
|
||||
+ builder.execution_engine_cls()
|
||||
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
|
||||
# as same as the spec definition.
|
||||
+ ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')
|
||||
@@ -982,68 +1111,20 @@ class PySpecCommand(Command):
|
||||
if len(self.md_doc_paths) == 0:
|
||||
print("no paths were specified, using default markdown file paths for pyspec"
|
||||
" build (spec fork: %s)" % self.spec_fork)
|
||||
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
"""
|
||||
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/altair/light-client/full-node.md
|
||||
specs/altair/light-client/light-client.md
|
||||
specs/altair/light-client/p2p-interface.md
|
||||
specs/altair/light-client/sync-protocol.md
|
||||
specs/altair/beacon-chain.md
|
||||
specs/altair/bls.md
|
||||
specs/altair/fork.md
|
||||
specs/altair/validator.md
|
||||
specs/altair/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/bellatrix/beacon-chain.md
|
||||
specs/bellatrix/fork.md
|
||||
specs/bellatrix/fork-choice.md
|
||||
specs/bellatrix/validator.md
|
||||
specs/bellatrix/p2p-interface.md
|
||||
sync/optimistic.md
|
||||
"""
|
||||
if self.spec_fork in (CAPELLA, DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/capella/light-client/fork.md
|
||||
specs/capella/light-client/full-node.md
|
||||
specs/capella/light-client/p2p-interface.md
|
||||
specs/capella/light-client/sync-protocol.md
|
||||
specs/capella/beacon-chain.md
|
||||
specs/capella/fork.md
|
||||
specs/capella/fork-choice.md
|
||||
specs/capella/validator.md
|
||||
specs/capella/p2p-interface.md
|
||||
"""
|
||||
if self.spec_fork in (DENEB, EIP6110):
|
||||
self.md_doc_paths += """
|
||||
specs/deneb/light-client/fork.md
|
||||
specs/deneb/light-client/full-node.md
|
||||
specs/deneb/light-client/p2p-interface.md
|
||||
specs/deneb/light-client/sync-protocol.md
|
||||
specs/deneb/beacon-chain.md
|
||||
specs/deneb/fork.md
|
||||
specs/deneb/fork-choice.md
|
||||
specs/deneb/polynomial-commitments.md
|
||||
specs/deneb/p2p-interface.md
|
||||
specs/deneb/validator.md
|
||||
"""
|
||||
if self.spec_fork == EIP6110:
|
||||
self.md_doc_paths += """
|
||||
specs/_features/eip6110/light-client/fork.md
|
||||
specs/_features/eip6110/light-client/full-node.md
|
||||
specs/_features/eip6110/light-client/p2p-interface.md
|
||||
specs/_features/eip6110/light-client/sync-protocol.md
|
||||
specs/_features/eip6110/beacon-chain.md
|
||||
specs/_features/eip6110/fork.md
|
||||
"""
|
||||
self.md_doc_paths = ""
|
||||
|
||||
for fork in ALL_FORKS:
|
||||
if is_post_fork(self.spec_fork, fork):
|
||||
# Append all files in fork directory recursively
|
||||
for root, dirs, files in os.walk(get_fork_directory(fork)):
|
||||
for filename in files:
|
||||
filepath = os.path.join(root, filename)
|
||||
if filepath.endswith('.md') and filepath not in IGNORE_SPEC_FILES:
|
||||
self.md_doc_paths += filepath + "\n"
|
||||
# Append extra files if any
|
||||
if fork in EXTRA_SPEC_FILES:
|
||||
self.md_doc_paths += EXTRA_SPEC_FILES[fork] + "\n"
|
||||
|
||||
if len(self.md_doc_paths) == 0:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
|
||||
@@ -1184,7 +1265,7 @@ setup(
|
||||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock", "pathos==0.3.0"],
|
||||
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
|
||||
},
|
||||
install_requires=[
|
||||
@@ -1196,8 +1277,9 @@ setup(
|
||||
"remerkleable==0.1.27",
|
||||
"trie==2.0.2",
|
||||
RUAMEL_YAML_VERSION,
|
||||
"lru-dict==1.1.8",
|
||||
"lru-dict==1.2.0",
|
||||
MARKO_VERSION,
|
||||
"py_arkworks_bls12381==0.3.4",
|
||||
"curdleproofs @ git+https://github.com/nalinbhardwaj/curdleproofs.pie@805d06785b6ff35fde7148762277dd1ae678beeb#egg=curdleproofs&subdirectory=curdleproofs",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
- [Helpers](#helpers)
|
||||
- [Protocols](#protocols)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [`get_payload`](#get_payload)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
@@ -40,7 +40,7 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### `get_payload`
|
||||
#### Modified `get_payload`
|
||||
|
||||
`get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type.
|
||||
|
||||
@@ -64,27 +64,12 @@ parameter to the `PayloadAttributes`.
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
if not is_merge_transition_complete(state):
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
return None
|
||||
|
||||
terminal_pow_block = get_terminal_pow_block(pow_chain)
|
||||
if terminal_pow_block is None:
|
||||
# Pre-merge, no prepare payload call is needed
|
||||
return None
|
||||
# Signify merge via producing on top of the terminal PoW block
|
||||
parent_hash = terminal_pow_block.block_hash
|
||||
else:
|
||||
# Post-merge, normal payload
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
payload_attributes = PayloadAttributes(
|
||||
|
||||
@@ -91,7 +91,8 @@ class ExecutionPayload(Container):
|
||||
block_hash: Hash32
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
excess_data_gas: uint256
|
||||
data_gas_used: uint64
|
||||
excess_data_gas: uint64
|
||||
deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP6110]
|
||||
```
|
||||
|
||||
@@ -116,7 +117,8 @@ class ExecutionPayloadHeader(Container):
|
||||
block_hash: Hash32
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
excess_data_gas: uint256
|
||||
data_gas_used: uint64
|
||||
excess_data_gas: uint64
|
||||
deposit_receipts_root: Root # [New in EIP6110]
|
||||
```
|
||||
|
||||
@@ -176,14 +178,12 @@ class BeaconState(Container):
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP6110]
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE) # [Modified in EIP6110]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in EIP6110]
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(state, block.body)
|
||||
```
|
||||
|
||||
#### Modified `process_operations`
|
||||
@@ -212,8 +212,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
|
||||
# [New in EIP6110]
|
||||
if is_execution_enabled(state, body):
|
||||
for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt)
|
||||
for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt)
|
||||
```
|
||||
|
||||
#### New `process_deposit_receipt`
|
||||
@@ -238,16 +237,22 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt)
|
||||
*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify prev_randao
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(execution_payload=payload, versioned_hashes=versioned_hashes)
|
||||
)
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
@@ -265,6 +270,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
data_gas_used=payload.data_gas_used,
|
||||
excess_data_gas=payload.excess_data_gas,
|
||||
deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP6110]
|
||||
)
|
||||
|
||||
@@ -88,7 +88,8 @@ def upgrade_to_eip6110(pre: deneb.BeaconState) -> BeaconState:
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
excess_data_gas=uint256(0),
|
||||
data_gas_used=uint64(0),
|
||||
excess_data_gas=uint64(0),
|
||||
deposit_receipts_root=Root(), # [New in EIP-6110]
|
||||
)
|
||||
post = BeaconState(
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
# EIP-6110 Light Client -- Fork Logic
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Upgrading light client data](#upgrading-light-client-data)
|
||||
- [Upgrading the store](#upgrading-the-store)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to upgrade existing light client objects based on the [Deneb specification](../../deneb/light-client/sync-protocol.md) to eip6110. This is necessary when processing pre-eip6110 data with a post-eip6110 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
|
||||
|
||||
### Upgrading light client data
|
||||
|
||||
A eip6110 `LightClientStore` can still process earlier light client data. In order to do so, that pre-eip6110 data needs to be locally upgraded to eip6110 before processing.
|
||||
|
||||
```python
|
||||
def upgrade_lc_header_to_eip6110(pre: deneb.LightClientHeader) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=pre.beacon,
|
||||
execution=ExecutionPayloadHeader(
|
||||
parent_hash=pre.execution.parent_hash,
|
||||
fee_recipient=pre.execution.fee_recipient,
|
||||
state_root=pre.execution.state_root,
|
||||
receipts_root=pre.execution.receipts_root,
|
||||
logs_bloom=pre.execution.logs_bloom,
|
||||
prev_randao=pre.execution.prev_randao,
|
||||
block_number=pre.execution.block_number,
|
||||
gas_limit=pre.execution.gas_limit,
|
||||
gas_used=pre.execution.gas_used,
|
||||
timestamp=pre.execution.timestamp,
|
||||
extra_data=pre.execution.extra_data,
|
||||
base_fee_per_gas=pre.execution.base_fee_per_gas,
|
||||
block_hash=pre.execution.block_hash,
|
||||
transactions_root=pre.execution.transactions_root,
|
||||
withdrawals_root=pre.execution.withdrawals_root,
|
||||
excess_data_gas=pre.execution.excess_data_gas,
|
||||
deposit_receipts_root=Root(), # [New in EIP6110]
|
||||
),
|
||||
execution_branch=pre.execution_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_bootstrap_to_eip6110(pre: deneb.LightClientBootstrap) -> LightClientBootstrap:
|
||||
return LightClientBootstrap(
|
||||
header=upgrade_lc_header_to_eip6110(pre.header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
current_sync_committee_branch=pre.current_sync_committee_branch,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_update_to_eip6110(pre: deneb.LightClientUpdate) -> LightClientUpdate:
|
||||
return LightClientUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
next_sync_committee_branch=pre.next_sync_committee_branch,
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_eip6110(pre: deneb.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
finality_branch=pre.finality_branch,
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_eip6110(pre: deneb.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_eip6110(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
### Upgrading the store
|
||||
|
||||
Existing `LightClientStore` objects based on Deneb MUST be upgraded to eip6110 before eip6110 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `EIP6110_FORK_EPOCH`.
|
||||
|
||||
```python
|
||||
def upgrade_lc_store_to_eip6110(pre: deneb.LightClientStore) -> LightClientStore:
|
||||
if pre.best_valid_update is None:
|
||||
best_valid_update = None
|
||||
else:
|
||||
best_valid_update = upgrade_lc_update_to_eip6110(pre.best_valid_update)
|
||||
return LightClientStore(
|
||||
finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
best_valid_update=best_valid_update,
|
||||
optimistic_header=upgrade_lc_header_to_eip6110(pre.optimistic_header),
|
||||
previous_max_active_participants=pre.previous_max_active_participants,
|
||||
current_max_active_participants=pre.current_max_active_participants,
|
||||
)
|
||||
```
|
||||
@@ -1,77 +0,0 @@
|
||||
# EIP-6110 Light Client -- Full Node
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds information about the execution payload to light client data as part of the EIP-6110 upgrade.
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
epoch = compute_epoch_at_slot(block.message.slot)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
payload = block.message.body.execution_payload
|
||||
execution_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipts_root=payload.receipts_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
prev_randao=payload.prev_randao,
|
||||
block_number=payload.block_number,
|
||||
gas_limit=payload.gas_limit,
|
||||
gas_used=payload.gas_used,
|
||||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
execution_header.excess_data_gas = payload.excess_data_gas
|
||||
|
||||
# [New in EIP6110]
|
||||
if epoch >= EIP6110_FORK_EPOCH:
|
||||
execution_header.deposit_receipts_root = hash_tree_root(payload.deposit_receipts)
|
||||
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
execution=execution_header,
|
||||
execution_branch=execution_branch,
|
||||
)
|
||||
```
|
||||
@@ -1,111 +0,0 @@
|
||||
# EIP-6110 Light Client -- Networking
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Networking](#networking)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`light_client_finality_update`](#light_client_finality_update)
|
||||
- [`light_client_optimistic_update`](#light_client_optimistic_update)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [GetLightClientBootstrap](#getlightclientbootstrap)
|
||||
- [LightClientUpdatesByRange](#lightclientupdatesbyrange)
|
||||
- [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
|
||||
- [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Networking
|
||||
|
||||
The [Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) is extended to exchange [EIP-6110 light client data](./sync-protocol.md).
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
#### Topics and messages
|
||||
|
||||
##### Global topics
|
||||
|
||||
###### `light_client_finality_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` |
|
||||
|
||||
###### `light_client_optimistic_update`
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` |
|
||||
|
||||
### The Req/Resp domain
|
||||
|
||||
#### Messages
|
||||
|
||||
##### GetLightClientBootstrap
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientBootstrap` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientBootstrap` |
|
||||
|
||||
##### LightClientUpdatesByRange
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response chunk SSZ type |
|
||||
|--------------------------------------------------------|----------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientUpdate` |
|
||||
|
||||
##### GetLightClientFinalityUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|-------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` |
|
||||
|
||||
##### GetLightClientOptimisticUpdate
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Response SSZ type |
|
||||
|--------------------------------------------------------|---------------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
|
||||
| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` |
|
||||
@@ -1,89 +0,0 @@
|
||||
# EIP-6110 Light Client -- Sync Protocol
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Modified `get_lc_execution_root`](#modified-get_lc_execution_root)
|
||||
- [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade updates light client data to include the EIP-6110 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Deneb based deployments to EIP-6110.
|
||||
|
||||
Additional documents describes the impact of the upgrade on certain roles:
|
||||
- [Full node](./full-node.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Modified `get_lc_execution_root`
|
||||
|
||||
```python
|
||||
def get_lc_execution_root(header: LightClientHeader) -> Root:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return hash_tree_root(header.execution)
|
||||
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
execution_header = capella.ExecutionPayloadHeader(
|
||||
parent_hash=header.execution.parent_hash,
|
||||
fee_recipient=header.execution.fee_recipient,
|
||||
state_root=header.execution.state_root,
|
||||
receipts_root=header.execution.receipts_root,
|
||||
logs_bloom=header.execution.logs_bloom,
|
||||
prev_randao=header.execution.prev_randao,
|
||||
block_number=header.execution.block_number,
|
||||
gas_limit=header.execution.gas_limit,
|
||||
gas_used=header.execution.gas_used,
|
||||
timestamp=header.execution.timestamp,
|
||||
extra_data=header.execution.extra_data,
|
||||
base_fee_per_gas=header.execution.base_fee_per_gas,
|
||||
block_hash=header.execution.block_hash,
|
||||
transactions_root=header.execution.transactions_root,
|
||||
withdrawals_root=header.execution.withdrawals_root,
|
||||
)
|
||||
return hash_tree_root(execution_header)
|
||||
|
||||
return Root()
|
||||
```
|
||||
|
||||
### Modified `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
# [New in EIP-6110]
|
||||
if epoch < EIP6110_FORK_EPOCH:
|
||||
if header.execution.deposit_receipts_root != Root():
|
||||
return False
|
||||
|
||||
if epoch < DENEB_FORK_EPOCH:
|
||||
if header.execution.excess_data_gas != uint256(0):
|
||||
return False
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
@@ -236,8 +236,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
verify_builder_block_bid(state, block)
|
||||
process_sharded_data(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_execution_payload(state, block, EXECUTION_ENGINE)
|
||||
process_execution_payload(state, block, EXECUTION_ENGINE)
|
||||
|
||||
if not is_builder_block_slot(block.slot):
|
||||
process_randao(state, block.body)
|
||||
@@ -371,8 +370,7 @@ def process_execution_payload(state: BeaconState, block: BeaconBlock, execution_
|
||||
payload = block.body.payload_data.value.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify random
|
||||
assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
|
||||
467
specs/_features/whisk/beacon-chain.md
Normal file
467
specs/_features/whisk/beacon-chain.md
Normal file
@@ -0,0 +1,467 @@
|
||||
# Whisk -- The Beacon Chain
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Cryptography](#cryptography)
|
||||
- [BLS](#bls)
|
||||
- [Curdleproofs and opening proofs](#curdleproofs-and-opening-proofs)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [`WhiskTracker`](#whisktracker)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [Whisk](#whisk)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Deposits](#deposits)
|
||||
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document details the beacon chain additions and changes of to support the Whisk SSLE,
|
||||
|
||||
*Note:* This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development.
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value | Description |
|
||||
| ---------------------------------- | -------------------------- | ----------------------------------------------------------- |
|
||||
| `WHISK_CANDIDATE_TRACKERS_COUNT` | `uint64(2**14)` (= 16,384) | number of candidate trackers |
|
||||
| `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers |
|
||||
| `WHISK_EPOCHS_PER_SHUFFLING_PHASE` | `Epoch(2**8)` (= 256) | epochs per shuffling phase |
|
||||
| `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7)` (= 128) | number of validators shuffled per shuffle step |
|
||||
| `WHISK_PROPOSER_SELECTION_GAP` | `Epoch(2)` | gap between proposer selection and the block proposal phase |
|
||||
| `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof |
|
||||
| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof |
|
||||
|
||||
| Name | Value |
|
||||
| ---------------------------------- | -------------------------- |
|
||||
| `DOMAIN_WHISK_CANDIDATE_SELECTION` | `DomainType('0x07000000')` |
|
||||
| `DOMAIN_WHISK_SHUFFLE` | `DomainType('0x07100000')` |
|
||||
| `DOMAIN_WHISK_PROPOSER_SELECTION` | `DomainType('0x07200000')` |
|
||||
|
||||
## Cryptography
|
||||
|
||||
### BLS
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| ------------------- | ---------------------------------------- | ----------------------------- |
|
||||
| `BLSFieldElement` | `uint256` | BLS12-381 scalar |
|
||||
| `BLSG1Point` | `Bytes48` | compressed BLS12-381 G1 point |
|
||||
| `WhiskShuffleProof` | `ByteList[WHISK_MAX_SHUFFLE_PROOF_SIZE]` | Serialized shuffle proof |
|
||||
| `WhiskTrackerProof` | `ByteList[WHISK_MAX_OPENING_PROOF_SIZE]` | Serialized tracker proof |
|
||||
|
||||
*Note*: A subgroup check MUST be performed when deserializing a `BLSG1Point` for use in any of the functions below.
|
||||
|
||||
```python
|
||||
def BLSG1ScalarMultiply(scalar: BLSFieldElement, point: BLSG1Point) -> BLSG1Point:
|
||||
return bls.G1_to_bytes48(bls.multiply(bls.bytes48_to_G1(point), scalar))
|
||||
```
|
||||
|
||||
```python
|
||||
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
"""
|
||||
Convert bytes to a BLS field scalar. The output is not uniform over the BLS field.
|
||||
TODO: Deneb will introduces this helper too. Should delete it once it's rebased to post-Deneb.
|
||||
"""
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
```
|
||||
|
||||
| Name | Value |
|
||||
| ------------------ | ------------------------------------------------------------------------------- |
|
||||
| `BLS_G1_GENERATOR` | `bls.G1_to_bytes48(bls.G1)` |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
|
||||
|
||||
### Curdleproofs and opening proofs
|
||||
|
||||
Note that Curdleproofs (Whisk Shuffle Proofs), the tracker opening proofs and all related data structures and verifier code (along with tests) is specified in [curdleproofs.pie](https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only) repository.
|
||||
|
||||
```python
|
||||
def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker],
|
||||
post_shuffle_trackers: Sequence[WhiskTracker],
|
||||
M: BLSG1Point,
|
||||
shuffle_proof: WhiskShuffleProof) -> bool:
|
||||
"""
|
||||
Verify `post_shuffle_trackers` is a permutation of `pre_shuffle_trackers`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
```
|
||||
|
||||
```python
|
||||
def IsValidWhiskOpeningProof(tracker: WhiskTracker,
|
||||
k_commitment: BLSG1Point,
|
||||
tracker_proof: WhiskTrackerProof) -> bool:
|
||||
"""
|
||||
Verify knowledge of `k` such that `tracker.k_r_G == k * tracker.r_G` and `k_commitment == k * BLS_G1_GENERATOR`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
```
|
||||
|
||||
## Epoch processing
|
||||
|
||||
### `WhiskTracker`
|
||||
|
||||
```python
|
||||
class WhiskTracker(Container):
|
||||
r_G: BLSG1Point # r * G
|
||||
k_r_G: BLSG1Point # k * r * G
|
||||
```
|
||||
|
||||
### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Frozen in Capella, replaced by historical_summaries
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] # [Modified in Whisk]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Participation
|
||||
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Inactivity
|
||||
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Sync
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Execution
|
||||
latest_execution_payload_header: ExecutionPayloadHeader
|
||||
# Withdrawals
|
||||
next_withdrawal_index: WithdrawalIndex
|
||||
next_withdrawal_validator_index: ValidatorIndex
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
|
||||
# Whisk
|
||||
whisk_candidate_trackers: Vector[WhiskTracker, WHISK_CANDIDATE_TRACKERS_COUNT] # [New in Whisk]
|
||||
whisk_proposer_trackers: Vector[WhiskTracker, WHISK_PROPOSER_TRACKERS_COUNT] # [New in Whisk]
|
||||
whisk_trackers: List[WhiskTracker, VALIDATOR_REGISTRY_LIMIT] # [New in Whisk]
|
||||
whisk_k_commitments: List[BLSG1Point, VALIDATOR_REGISTRY_LIMIT] # [New in Whisk]
|
||||
```
|
||||
|
||||
```python
|
||||
def select_whisk_trackers(state: BeaconState, epoch: Epoch) -> None:
|
||||
# Select proposer trackers from candidate trackers
|
||||
proposer_seed = get_seed(state, epoch - WHISK_PROPOSER_SELECTION_GAP, DOMAIN_WHISK_PROPOSER_SELECTION)
|
||||
for i in range(WHISK_PROPOSER_TRACKERS_COUNT):
|
||||
index = compute_shuffled_index(uint64(i), uint64(len(state.whisk_candidate_trackers)), proposer_seed)
|
||||
state.whisk_proposer_trackers[i] = state.whisk_candidate_trackers[index]
|
||||
|
||||
# Select candidate trackers from active validator trackers
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
for i in range(WHISK_CANDIDATE_TRACKERS_COUNT):
|
||||
seed = hash(get_seed(state, epoch, DOMAIN_WHISK_CANDIDATE_SELECTION) + uint_to_bytes(i))
|
||||
candidate_index = compute_proposer_index(state, active_validator_indices, seed) # sample by effective balance
|
||||
state.whisk_candidate_trackers[i] = state.whisk_trackers[candidate_index]
|
||||
```
|
||||
|
||||
```python
|
||||
def process_whisk_updates(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % WHISK_EPOCHS_PER_SHUFFLING_PHASE == 0: # select trackers at the start of shuffling phases
|
||||
select_whisk_trackers(state, next_epoch)
|
||||
```
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_inactivity_updates(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_summaries_update(state)
|
||||
process_participation_flag_updates(state)
|
||||
process_sync_committee_updates(state)
|
||||
process_whisk_updates(state) # [New in Whisk]
|
||||
```
|
||||
|
||||
## Block processing
|
||||
|
||||
### Block header
|
||||
|
||||
```python
|
||||
def process_whisk_opening_proof(state: BeaconState, block: BeaconBlock) -> None:
|
||||
tracker = state.whisk_proposer_trackers[state.slot % WHISK_PROPOSER_TRACKERS_COUNT]
|
||||
k_commitment = state.whisk_k_commitments[block.proposer_index]
|
||||
assert IsValidWhiskOpeningProof(tracker, k_commitment, block.body.whisk_opening_proof)
|
||||
```
|
||||
|
||||
Removed `assert block.proposer_index == get_beacon_proposer_index(state)` check in Whisk.
|
||||
|
||||
```python
|
||||
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||
# Verify that the slots match
|
||||
assert block.slot == state.slot
|
||||
# Verify that the block is newer than latest block header
|
||||
assert block.slot > state.latest_block_header.slot
|
||||
|
||||
# # Verify that proposer index is the correct index
|
||||
# assert block.proposer_index == get_beacon_proposer_index(state)
|
||||
|
||||
# Verify that the parent matches
|
||||
assert block.parent_root == hash_tree_root(state.latest_block_header)
|
||||
# Cache current block as the new latest block
|
||||
state.latest_block_header = BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
proposer_index=block.proposer_index,
|
||||
parent_root=block.parent_root,
|
||||
state_root=Bytes32(), # Overwritten in the next process_slot call
|
||||
body_root=hash_tree_root(block.body),
|
||||
)
|
||||
|
||||
# Verify proposer is not slashed
|
||||
proposer = state.validators[block.proposer_index]
|
||||
assert not proposer.slashed
|
||||
process_whisk_opening_proof(state, block) # [New in Whisk]
|
||||
```
|
||||
|
||||
### Whisk
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(capella.BeaconBlockBody):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Operations
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload
|
||||
# Capella operations
|
||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||
# Whisk
|
||||
whisk_opening_proof: WhiskTrackerProof # [New in Whisk]
|
||||
whisk_post_shuffle_trackers: Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE] # [New in Whisk]
|
||||
whisk_shuffle_proof: WhiskShuffleProof # [New in Whisk]
|
||||
whisk_shuffle_proof_M_commitment: BLSG1Point # [New in Whisk]
|
||||
whisk_registration_proof: WhiskTrackerProof # [New in Whisk]
|
||||
whisk_tracker: WhiskTracker # [New in Whisk]
|
||||
whisk_k_commitment: BLSG1Point # k * BLS_G1_GENERATOR [New in Whisk]
|
||||
```
|
||||
|
||||
```python
|
||||
def get_shuffle_indices(randao_reveal: BLSSignature) -> Sequence[uint64]:
|
||||
"""
|
||||
Given a `randao_reveal` return the list of indices that got shuffled from the entire candidate set
|
||||
"""
|
||||
indices = []
|
||||
for i in range(0, WHISK_VALIDATORS_PER_SHUFFLE):
|
||||
# XXX ensure we are not suffering from modulo bias
|
||||
pre_image = randao_reveal + uint_to_bytes(uint64(i))
|
||||
shuffle_index = bytes_to_uint64(hash(pre_image)[0:8]) % WHISK_CANDIDATE_TRACKERS_COUNT
|
||||
indices.append(shuffle_index)
|
||||
|
||||
return indices
|
||||
```
|
||||
|
||||
```python
|
||||
def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Check the shuffle proof
|
||||
shuffle_indices = get_shuffle_indices(body.randao_reveal)
|
||||
pre_shuffle_trackers = [state.whisk_candidate_trackers[i] for i in shuffle_indices]
|
||||
|
||||
shuffle_epoch = get_current_epoch(state) % WHISK_EPOCHS_PER_SHUFFLING_PHASE
|
||||
if shuffle_epoch + WHISK_PROPOSER_SELECTION_GAP + 1 >= WHISK_EPOCHS_PER_SHUFFLING_PHASE:
|
||||
# Require trackers set to zero during cooldown
|
||||
assert body.whisk_post_shuffle_trackers == Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE]()
|
||||
assert body.whisk_shuffle_proof_M_commitment == BLSG1Point()
|
||||
assert body.whisk_shuffle_proof == WhiskShuffleProof()
|
||||
post_shuffle_trackers = pre_shuffle_trackers
|
||||
else:
|
||||
# Require shuffled trackers during shuffle
|
||||
assert IsValidWhiskShuffleProof(
|
||||
pre_shuffle_trackers,
|
||||
body.whisk_post_shuffle_trackers,
|
||||
body.whisk_shuffle_proof_M_commitment,
|
||||
body.whisk_shuffle_proof,
|
||||
)
|
||||
post_shuffle_trackers = body.whisk_post_shuffle_trackers
|
||||
|
||||
# Shuffle candidate trackers
|
||||
for i, shuffle_index in enumerate(shuffle_indices):
|
||||
state.whisk_candidate_trackers[shuffle_index] = post_shuffle_trackers[i]
|
||||
```
|
||||
|
||||
```python
|
||||
def is_k_commitment_unique(state: BeaconState, k_commitment: BLSG1Point) -> bool:
|
||||
return all([whisk_k_commitment != k_commitment for whisk_k_commitment in state.whisk_k_commitments])
|
||||
```
|
||||
|
||||
```python
|
||||
def process_whisk(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
process_shuffled_trackers(state, body)
|
||||
|
||||
# Overwrite all validator Whisk fields (first Whisk proposal) or just the permutation commitment (next proposals)
|
||||
proposer = state.validators[get_beacon_proposer_index(state)]
|
||||
if proposer.whisk_tracker.r_G == BLS_G1_GENERATOR: # first Whisk proposal
|
||||
assert body.whisk_tracker.r_G != BLS_G1_GENERATOR
|
||||
assert is_k_commitment_unique(state, body.whisk_k_commitment)
|
||||
assert IsValidWhiskOpeningProof(
|
||||
body.whisk_tracker,
|
||||
body.whisk_k_commitment,
|
||||
body.whisk_registration_proof,
|
||||
)
|
||||
proposer.whisk_tracker = body.whisk_tracker
|
||||
proposer.whisk_k_commitment = body.whisk_k_commitment
|
||||
else: # next Whisk proposals
|
||||
assert body.whisk_registration_proof == WhiskTrackerProof()
|
||||
assert body.whisk_tracker == WhiskTracker()
|
||||
assert body.whisk_k_commitment == BLSG1Point()
|
||||
assert body.whisk_shuffle_proof_M_commitment == BLSG1Point()
|
||||
```
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_whisk(state, block.body) # [New in Whisk]
|
||||
```
|
||||
|
||||
### Deposits
|
||||
|
||||
```python
|
||||
def get_initial_whisk_k(validator_index: ValidatorIndex, counter: int) -> BLSFieldElement:
|
||||
# hash `validator_index || counter`
|
||||
return BLSFieldElement(bytes_to_bls_field(hash(uint_to_bytes(validator_index) + uint_to_bytes(uint64(counter)))))
|
||||
```
|
||||
|
||||
```python
|
||||
def get_unique_whisk_k(state: BeaconState, validator_index: ValidatorIndex) -> BLSFieldElement:
|
||||
counter = 0
|
||||
while True:
|
||||
k = get_initial_whisk_k(validator_index, counter)
|
||||
if is_k_commitment_unique(state, BLSG1ScalarMultiply(k, BLS_G1_GENERATOR)):
|
||||
return k # unique by trial and error
|
||||
counter += 1
|
||||
```
|
||||
|
||||
```python
|
||||
def get_k_commitment(k: BLSFieldElement) -> BLSG1Point:
|
||||
return BLSG1ScalarMultiply(k, BLS_G1_GENERATOR)
|
||||
```
|
||||
|
||||
```python
|
||||
def get_initial_tracker(k: BLSFieldElement) -> WhiskTracker:
|
||||
return WhiskTracker(r_G=BLS_G1_GENERATOR, k_r_G=BLSG1ScalarMultiply(k, BLS_G1_GENERATOR))
|
||||
```
|
||||
|
||||
```python
|
||||
def apply_deposit(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> None:
|
||||
validator_pubkeys = [validator.pubkey for validator in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
# Initialize validator if the deposit signature is valid
|
||||
if bls.Verify(pubkey, signing_root, signature):
|
||||
index = get_index_for_new_validator(state)
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.validators, index, validator)
|
||||
set_or_append_list(state.balances, index, amount)
|
||||
set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
# [New in Whisk]
|
||||
k = get_unique_whisk_k(state, ValidatorIndex(len(state.validators) - 1))
|
||||
state.whisk_trackers.append(get_initial_tracker(k))
|
||||
state.whisk_k_commitments.append(get_k_commitment(k))
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
```
|
||||
|
||||
### `get_beacon_proposer_index`
|
||||
|
||||
```python
|
||||
def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
||||
"""
|
||||
Return the beacon proposer index at the current slot.
|
||||
"""
|
||||
assert state.latest_block_header.slot == state.slot # sanity check `process_block_header` has been called
|
||||
return state.latest_block_header.proposer_index
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Whisk testing only.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
state_capella = capella.initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash,
|
||||
eth1_timestamp,
|
||||
deposits,
|
||||
execution_payload_header,
|
||||
)
|
||||
state = upgrade_to_whisk(state_capella)
|
||||
state.fork.previous_version = WHISK_FORK_VERSION
|
||||
state.fork.current_version = WHISK_FORK_VERSION
|
||||
return state
|
||||
```
|
||||
135
specs/_features/whisk/fork.md
Normal file
135
specs/_features/whisk/fork.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Whisk -- Fork Logic
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Whisk](#fork-to-whisk)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of Whisk upgrade.
|
||||
|
||||
|
||||
```
|
||||
"""
|
||||
WHISK_FORK_EPOCH
|
||||
| cooldown
|
||||
| | ||
|
||||
v vsvv
|
||||
--+~~~~~~~~~~~~~~~~~~~~~----+-
|
||||
shuffling ^
|
||||
|
|
||||
|
|
||||
proposer selection
|
||||
candidate selection
|
||||
"""
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| -------------------- | ----------------------- |
|
||||
| `WHISK_FORK_VERSION` | `Version('0x05000000')` |
|
||||
| `WHISK_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
|
||||
## Fork to Whisk
|
||||
|
||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == WHISK_FORK_EPOCH`, an irregular state change is made to upgrade to Whisk. `WHISK_FORK_EPOCH` must be a multiple of `WHISK_RUN_DURATION_IN_EPOCHS`.
|
||||
|
||||
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `WHISK_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
||||
|
||||
This ensures that we drop right into the beginning of the shuffling phase but without `process_whisk_epoch()` triggering for this Whisk run. Hence we handle all the setup ourselves in `upgrade_to_whisk()` below.
|
||||
|
||||
```python
|
||||
def whisk_candidate_selection(state: BeaconState, epoch: Epoch) -> None:
|
||||
# TODO
|
||||
# pylint: disable=unused-argument
|
||||
pass
|
||||
```
|
||||
|
||||
```python
|
||||
def whisk_proposer_selection(state: BeaconState, epoch: Epoch) -> None:
|
||||
# TODO
|
||||
# pylint: disable=unused-argument
|
||||
pass
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_to_whisk(pre: bellatrix.BeaconState) -> BeaconState:
|
||||
# Compute initial unsafe trackers for all validators
|
||||
ks = [get_initial_whisk_k(ValidatorIndex(validator_index), 0) for validator_index in range(len(pre.validators))]
|
||||
whisk_k_commitments = [get_k_commitment(k) for k in ks]
|
||||
whisk_trackers = [get_initial_tracker(k) for k in ks]
|
||||
|
||||
epoch = bellatrix.get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=WHISK_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=[],
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Whisk
|
||||
whisk_proposer_trackers=[WhiskTracker() for _ in range(WHISK_PROPOSER_TRACKERS_COUNT)], # [New in Whisk]
|
||||
whisk_candidate_trackers=[WhiskTracker() for _ in range(WHISK_CANDIDATE_TRACKERS_COUNT)], # [New in Whisk]
|
||||
whisk_trackers=whisk_trackers, # [New in Whisk]
|
||||
whisk_k_commitments=whisk_k_commitments, # [New in Whisk]
|
||||
)
|
||||
|
||||
# Do a candidate selection followed by a proposer selection so that we have proposers for the upcoming day
|
||||
# Use an old epoch when selecting candidates so that we don't get the same seed as in the next candidate selection
|
||||
whisk_candidate_selection(post, epoch - WHISK_PROPOSER_SELECTION_GAP - 1)
|
||||
whisk_proposer_selection(post, epoch)
|
||||
|
||||
# Do a final round of candidate selection.
|
||||
# We need it so that we have something to shuffle over the upcoming shuffling phase.
|
||||
whisk_candidate_selection(post, epoch)
|
||||
|
||||
return post
|
||||
```
|
||||
@@ -33,7 +33,12 @@
|
||||
- [Modified `slash_validator`](#modified-slash_validator)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Execution engine](#execution-engine)
|
||||
- [Request data](#request-data)
|
||||
- [`NewPayloadRequest`](#newpayloadrequest)
|
||||
- [Engine APIs](#engine-apis)
|
||||
- [`notify_new_payload`](#notify_new_payload)
|
||||
- [`is_valid_block_hash`](#is_valid_block_hash)
|
||||
- [`verify_and_notify_new_payload`](#verify_and_notify_new_payload)
|
||||
- [Block processing](#block-processing)
|
||||
- [Execution payload](#execution-payload)
|
||||
- [`process_execution_payload`](#process_execution_payload)
|
||||
@@ -300,18 +305,30 @@ def slash_validator(state: BeaconState,
|
||||
|
||||
### Execution engine
|
||||
|
||||
#### Request data
|
||||
|
||||
##### `NewPayloadRequest`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class NewPayloadRequest(object):
|
||||
execution_payload: ExecutionPayload
|
||||
```
|
||||
|
||||
#### Engine APIs
|
||||
|
||||
The implementation-dependent `ExecutionEngine` protocol encapsulates the execution sub-system logic via:
|
||||
|
||||
* a state object `self.execution_state` of type `ExecutionState`
|
||||
* a notification function `self.notify_new_payload` which may apply changes to the `self.execution_state`
|
||||
|
||||
*Note*: `notify_new_payload` is a function accessed through the `EXECUTION_ENGINE` module which instantiates the `ExecutionEngine` protocol.
|
||||
|
||||
The body of this function is implementation dependent.
|
||||
The body of these functions are implementation dependent.
|
||||
The Engine API may be used to implement this and similarly defined functions via an external execution engine.
|
||||
|
||||
#### `notify_new_payload`
|
||||
|
||||
`notify_new_payload` is a function accessed through the `EXECUTION_ENGINE` module which instantiates the `ExecutionEngine` protocol.
|
||||
|
||||
```python
|
||||
def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
"""
|
||||
@@ -320,6 +337,31 @@ def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayloa
|
||||
...
|
||||
```
|
||||
|
||||
#### `is_valid_block_hash`
|
||||
|
||||
```python
|
||||
def is_valid_block_hash(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload.block_hash`` is computed correctly.
|
||||
"""
|
||||
...
|
||||
```
|
||||
|
||||
#### `verify_and_notify_new_payload`
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
if not self.is_valid_block_hash(new_payload_request.execution_payload):
|
||||
return False
|
||||
if not self.notify_new_payload(new_payload_request.execution_payload):
|
||||
return False
|
||||
return True
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
*Note*: The call to the `process_execution_payload` must happen before the call to the `process_randao` as the former depends on the `randao_mix` computed with the reveal of the previous block.
|
||||
@@ -328,7 +370,7 @@ def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayloa
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Bellatrix]
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE) # [New in Bellatrix]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
@@ -340,7 +382,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
##### `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
@@ -349,7 +393,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
assert execution_engine.verify_and_notify_new_payload(NewPayloadRequest(execution_payload=payload))
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
|
||||
@@ -14,7 +14,6 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||
|
||||
- [Warning](#warning)
|
||||
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
||||
- [Configuration](#configuration)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
@@ -41,15 +40,6 @@ Refer to the note in the [validator guide](./validator.md) for further details.
|
||||
|
||||
## Modifications in Bellatrix
|
||||
|
||||
### Configuration
|
||||
|
||||
This section outlines modifications constants that are used in this spec.
|
||||
|
||||
| Name | Value | Description |
|
||||
|---|---|---|
|
||||
| `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. |
|
||||
| `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. |
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in Bellatrix to support upgraded types.
|
||||
@@ -61,11 +51,6 @@ All topics remain stable except the beacon block topic which is updated with the
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 and Altair documents unless explicitly noted here.
|
||||
|
||||
Starting at Bellatrix upgrade, each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24)
|
||||
has a maximum size of `GOSSIP_MAX_SIZE_BELLATRIX`.
|
||||
Clients MUST reject (fail validation) messages that are over this size limit.
|
||||
Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||
|
||||
The derivation of the `message-id` remains stable.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
@@ -130,10 +115,6 @@ down-scoring or disconnection.
|
||||
|
||||
Request and Response remain unchanged unless explicitly noted here.
|
||||
|
||||
Starting at Bellatrix upgrade,
|
||||
a global maximum uncompressed byte size of `MAX_CHUNK_SIZE_BELLATRIX` MUST be applied to all method response chunks
|
||||
regardless of type specific bounds that *MUST* also be respected.
|
||||
|
||||
Bellatrix fork-digest is introduced to the `context` enum to specify Bellatrix block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
@@ -171,17 +152,12 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
|
||||
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
|
||||
place at Phase 0. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
|
||||
place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network.
|
||||
At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
|
||||
filled entirely with data at a cost of 16 gas per byte can create a valid
|
||||
`ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for
|
||||
current mainnet conditions.
|
||||
|
||||
Geth currently has a [max gossip message size](https://github.com/ethereum/go-ethereum/blob/3ce9f6d96f38712f5d6756e97b59ccc20cc403b3/eth/protocols/eth/protocol.go#L49) of 10 MiB.
|
||||
To support backward compatibility with this previously defined network limit,
|
||||
we adopt `GOSSIP_MAX_SIZE_BELLATRIX` of 10 MiB for maximum gossip sizes at the
|
||||
point of Bellatrix and beyond. Note, that clients SHOULD still reject objects
|
||||
that exceed their maximum theoretical bounds which in most cases is less than `GOSSIP_MAX_SIZE_BELLATRIX`.
|
||||
|
||||
Note, that due to additional size induced by the `BeaconBlock` contents (e.g.
|
||||
proposer signature, operations lists, etc) this does reduce the
|
||||
theoretical max valid `ExecutionPayload` (and `transactions` list) size as
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [`GetPayloadResponse`](#getpayloadresponse)
|
||||
- [`get_pow_block_at_terminal_total_difficulty`](#get_pow_block_at_terminal_total_difficulty)
|
||||
- [`get_terminal_pow_block`](#get_terminal_pow_block)
|
||||
- [Protocols](#protocols)
|
||||
@@ -36,6 +37,14 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
## Helpers
|
||||
|
||||
### `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
```
|
||||
|
||||
### `get_pow_block_at_terminal_total_difficulty`
|
||||
|
||||
```python
|
||||
@@ -83,13 +92,13 @@ The Engine API may be used to implement it with an external execution engine.
|
||||
|
||||
#### `get_payload`
|
||||
|
||||
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
|
||||
has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most recent version of
|
||||
the execution payload that has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
|
||||
```python
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
"""
|
||||
Return ``execution_payload`` object.
|
||||
Return ``GetPayloadResponse`` object.
|
||||
"""
|
||||
...
|
||||
```
|
||||
@@ -118,12 +127,13 @@ To obtain an execution payload, a block proposer building a block on top of a `s
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
execution_engine: ExecutionEngine,
|
||||
pow_chain: Optional[Dict[Hash32, PowBlock]]=None) -> Optional[PayloadId]:
|
||||
if not is_merge_transition_complete(state):
|
||||
assert pow_chain is not None
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
@@ -162,7 +172,7 @@ def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: Exe
|
||||
# Pre-merge, empty payload
|
||||
return ExecutionPayload()
|
||||
else:
|
||||
return execution_engine.get_payload(payload_id)
|
||||
return execution_engine.get_payload(payload_id).execution_payload
|
||||
```
|
||||
|
||||
*Note*: It is recommended for a validator to call `prepare_execution_payload` as soon as input parameters become known,
|
||||
|
||||
@@ -331,9 +331,9 @@ def process_historical_summaries_update(state: BeaconState) -> None:
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload) # [New in Capella]
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in Capella]
|
||||
# [Modified in Capella] Removed `is_execution_enabled` check in Capella
|
||||
process_withdrawals(state, block.body.execution_payload) # [New in Capella]
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE) # [Modified in Capella]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in Capella]
|
||||
@@ -404,19 +404,21 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
|
||||
#### Modified `process_execution_payload`
|
||||
|
||||
*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type.
|
||||
*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type
|
||||
and removed the `is_merge_transition_complete` check.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
payload = body.execution_payload
|
||||
# [Modified in Capella] Removed `is_merge_transition_complete` check in Capella
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify prev_randao
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
assert execution_engine.verify_and_notify_new_payload(NewPayloadRequest(execution_payload=payload))
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
- [`notify_forkchoice_updated`](#notify_forkchoice_updated)
|
||||
- [Helpers](#helpers)
|
||||
- [Extended `PayloadAttributes`](#extended-payloadattributes)
|
||||
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
|
||||
- [`on_block`](#on_block)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@@ -60,3 +62,56 @@ class PayloadAttributes(object):
|
||||
suggested_fee_recipient: ExecutionAddress
|
||||
withdrawals: Sequence[Withdrawal] # [New in Capella]
|
||||
```
|
||||
|
||||
## Updated fork-choice handlers
|
||||
|
||||
### `on_block`
|
||||
|
||||
*Note*: The only modification is the deletion of the verification of merge transition block conditions.
|
||||
|
||||
```python
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
"""
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
assert block.parent_root in store.block_states
|
||||
# Make a copy of the state to avoid mutability issues
|
||||
pre_state = copy(store.block_states[block.parent_root])
|
||||
# Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past.
|
||||
assert get_current_slot(store) >= block.slot
|
||||
|
||||
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
assert block.slot > finalized_slot
|
||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block.parent_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
block_root = hash_tree_root(block)
|
||||
state_transition(state, signed_block, True)
|
||||
|
||||
# Add new block to the store
|
||||
store.blocks[block_root] = block
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
|
||||
# Eagerly compute unrealized justification and finality.
|
||||
compute_pulled_up_tip(store, block_root)
|
||||
```
|
||||
|
||||
@@ -11,9 +11,10 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
|
||||
- [Protocols](#protocols)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [`get_payload`](#get_payload)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
@@ -39,11 +40,20 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
## Helpers
|
||||
|
||||
### Modified `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
block_value: uint256
|
||||
```
|
||||
|
||||
## Protocols
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### `get_payload`
|
||||
#### Modified `get_payload`
|
||||
|
||||
`get_payload` returns the upgraded Capella `ExecutionPayload` type.
|
||||
|
||||
@@ -69,27 +79,12 @@ That is, `state` is the `previous_state` processed through any empty slots up to
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
if not is_merge_transition_complete(state):
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
return None
|
||||
|
||||
terminal_pow_block = get_terminal_pow_block(pow_chain)
|
||||
if terminal_pow_block is None:
|
||||
# Pre-merge, no prepare payload call is needed
|
||||
return None
|
||||
# Signify merge via producing on top of the terminal PoW block
|
||||
parent_hash = terminal_pow_block.block_hash
|
||||
else:
|
||||
# Post-merge, normal payload
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
# [Modified in Capella] Removed `is_merge_transition_complete` check in Capella
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
payload_attributes = PayloadAttributes(
|
||||
|
||||
@@ -24,13 +24,17 @@
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash)
|
||||
- [`tx_peek_blob_versioned_hashes`](#tx_peek_blob_versioned_hashes)
|
||||
- [`verify_kzg_commitments_against_transactions`](#verify_kzg_commitments_against_transactions)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Execution engine](#execution-engine)
|
||||
- [Request data](#request-data)
|
||||
- [Modified `NewPayloadRequest`](#modified-newpayloadrequest)
|
||||
- [Engine APIs](#engine-apis)
|
||||
- [`is_valid_versioned_hashes`](#is_valid_versioned_hashes)
|
||||
- [Modified `verify_and_notify_new_payload`](#modified-verify_and_notify_new_payload)
|
||||
- [Block processing](#block-processing)
|
||||
- [Execution payload](#execution-payload)
|
||||
- [`process_execution_payload`](#process_execution_payload)
|
||||
- [Blob KZG commitments](#blob-kzg-commitments)
|
||||
- [Modified `process_execution_payload`](#modified-process_execution_payload)
|
||||
- [Modified `process_voluntary_exit`](#modified-process_voluntary_exit)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@@ -38,14 +42,16 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds blobs to the beacon chain as part of Deneb. This is an extension of the Capella upgrade.
|
||||
Deneb is a consensus-layer upgrade containing a number of features. Including:
|
||||
* [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Shard Blob Transactions scale data-availability of Ethereum in a simple, forwards-compatible manner
|
||||
* [EIP-7044](https://github.com/ethereum/EIPs/pull/7044): Perpetually Valid Signed Voluntary Exits
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `VersionedHash` | `Bytes32` | |
|
||||
| `BlobIndex` | `uint64` | |
|
||||
| `VersionedHash` | `Bytes32` | *[New in Deneb:EIP4844]* |
|
||||
| `BlobIndex` | `uint64` | *[New in Deneb:EIP4844]* |
|
||||
|
||||
## Constants
|
||||
|
||||
@@ -66,9 +72,13 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi
|
||||
|
||||
### Execution
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**2)` (= 4) |
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `MAX_BLOB_COMMITMENTS_PER_BLOCK` | `uint64(2**12)` (= 4096) | *[New in Deneb:EIP4844]* hardfork independent fixed theoretical limit same as `LIMIT_BLOBS_PER_TX` (see EIP 4844) |
|
||||
| `MAX_BLOBS_PER_BLOCK` | `uint64(6)` | *[New in Deneb:EIP4844]* maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` |
|
||||
|
||||
*Note*: The blob transactions are packed into the execution payload by the EL/builder with their corresponding blobs being independently transmitted
|
||||
and are limited by `MAX_DATA_GAS_PER_BLOCK // DATA_GAS_PER_BLOB`. However the CL limit is independently defined by `MAX_BLOBS_PER_BLOCK`.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -94,9 +104,9 @@ class BeaconBlockBody(Container):
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload # [Modified in Deneb]
|
||||
execution_payload: ExecutionPayload # [Modified in Deneb:EIP4844]
|
||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in Deneb]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] # [New in Deneb:EIP4844]
|
||||
```
|
||||
|
||||
#### `ExecutionPayload`
|
||||
@@ -120,7 +130,8 @@ class ExecutionPayload(Container):
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
excess_data_gas: uint256 # [New in Deneb]
|
||||
data_gas_used: uint64 # [New in Deneb:EIP4844]
|
||||
excess_data_gas: uint64 # [New in Deneb:EIP4844]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
@@ -144,7 +155,8 @@ class ExecutionPayloadHeader(Container):
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
excess_data_gas: uint256 # [New in Deneb]
|
||||
data_gas_used: uint64 # [New in Deneb:EIP4844]
|
||||
excess_data_gas: uint64 # [New in Deneb:EIP4844]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
@@ -158,71 +170,81 @@ def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> Versioned
|
||||
return VERSIONED_HASH_VERSION_KZG + hash(kzg_commitment)[1:]
|
||||
```
|
||||
|
||||
#### `tx_peek_blob_versioned_hashes`
|
||||
|
||||
This function retrieves the hashes from the `SignedBlobTransaction` as defined in Deneb, using SSZ offsets.
|
||||
Offsets are little-endian `uint32` values, as defined in the [SSZ specification](../../ssz/simple-serialize.md).
|
||||
See [the full details of `blob_versioned_hashes` offset calculation](https://gist.github.com/protolambda/23bd106b66f6d4bb854ce46044aa3ca3).
|
||||
|
||||
```python
|
||||
def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedHash]:
|
||||
assert opaque_tx[0] == BLOB_TX_TYPE
|
||||
message_offset = 1 + uint32.decode_bytes(opaque_tx[1:5])
|
||||
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188
|
||||
blob_versioned_hashes_offset = (
|
||||
message_offset
|
||||
+ uint32.decode_bytes(opaque_tx[(message_offset + 188):(message_offset + 192)])
|
||||
)
|
||||
return [
|
||||
VersionedHash(opaque_tx[x:(x + 32)])
|
||||
for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32)
|
||||
]
|
||||
```
|
||||
|
||||
#### `verify_kzg_commitments_against_transactions`
|
||||
|
||||
```python
|
||||
def verify_kzg_commitments_against_transactions(transactions: Sequence[Transaction],
|
||||
kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
all_versioned_hashes: List[VersionedHash] = []
|
||||
for tx in transactions:
|
||||
if tx[0] == BLOB_TX_TYPE:
|
||||
all_versioned_hashes += tx_peek_blob_versioned_hashes(tx)
|
||||
return all_versioned_hashes == [kzg_commitment_to_versioned_hash(commitment) for commitment in kzg_commitments]
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Execution engine
|
||||
|
||||
#### Request data
|
||||
|
||||
##### Modified `NewPayloadRequest`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class NewPayloadRequest(object):
|
||||
execution_payload: ExecutionPayload
|
||||
versioned_hashes: Sequence[VersionedHash]
|
||||
```
|
||||
|
||||
#### Engine APIs
|
||||
|
||||
##### `is_valid_versioned_hashes`
|
||||
|
||||
```python
|
||||
def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPayloadRequest) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if the version hashes computed by the blob transactions of
|
||||
``new_payload_request.execution_payload`` matches ``new_payload_request.version_hashes``.
|
||||
"""
|
||||
...
|
||||
```
|
||||
|
||||
##### Modified `verify_and_notify_new_payload`
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
if not self.is_valid_block_hash(new_payload_request.execution_payload):
|
||||
return False
|
||||
|
||||
# [New in Deneb:EIP4844]
|
||||
if not self.is_valid_versioned_hashes(new_payload_request):
|
||||
return False
|
||||
|
||||
if not self.notify_new_payload(new_payload_request.execution_payload):
|
||||
return False
|
||||
|
||||
return True
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in Deneb]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(state, block.body) # [New in Deneb]
|
||||
```
|
||||
|
||||
#### Execution payload
|
||||
|
||||
##### `process_execution_payload`
|
||||
##### Modified `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify prev_randao
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
|
||||
# [New in Deneb:EIP4844] Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
|
||||
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
# [Modified in Deneb:EIP4844] Pass `versioned_hashes` to Execution Engine
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(execution_payload=payload, versioned_hashes=versioned_hashes)
|
||||
)
|
||||
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
@@ -241,16 +263,34 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
excess_data_gas=payload.excess_data_gas, # [New in Deneb]
|
||||
data_gas_used=payload.data_gas_used, # [New in Deneb:EIP4844]
|
||||
excess_data_gas=payload.excess_data_gas, # [New in Deneb:EIP4844]
|
||||
)
|
||||
```
|
||||
|
||||
#### Blob KZG commitments
|
||||
#### Modified `process_voluntary_exit`
|
||||
|
||||
*Note*: The function `process_voluntary_exit` is modified to use the a fixed fork version -- `CAPELLA_FORK_VERSION` -- for EIP-7044
|
||||
|
||||
```python
|
||||
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# pylint: disable=unused-argument
|
||||
assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
|
||||
voluntary_exit = signed_voluntary_exit.message
|
||||
validator = state.validators[voluntary_exit.validator_index]
|
||||
# Verify the validator is active
|
||||
assert is_active_validator(validator, get_current_epoch(state))
|
||||
# Verify exit has not been initiated
|
||||
assert validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
# Exits must specify an epoch when they become valid; they are not valid before then
|
||||
assert get_current_epoch(state) >= voluntary_exit.epoch
|
||||
# Verify the validator has been active long enough
|
||||
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
# Verify signature
|
||||
# [Modified in Deneb:EIP7044]
|
||||
domain = compute_domain(DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root)
|
||||
signing_root = compute_signing_root(voluntary_exit, domain)
|
||||
assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
|
||||
# Initiate exit
|
||||
initiate_validator_exit(state, voluntary_exit.validator_index)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Containers](#containers)
|
||||
- [Helpers](#helpers)
|
||||
- [`validate_blobs`](#validate_blobs)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
|
||||
- [`on_block`](#on_block)
|
||||
@@ -24,22 +23,12 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
|
||||
|
||||
## Helpers
|
||||
|
||||
#### `validate_blobs`
|
||||
|
||||
```python
|
||||
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs: Sequence[Blob],
|
||||
proofs: Sequence[KZGProof]) -> None:
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
assert len(blobs) == len(proofs)
|
||||
|
||||
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
|
||||
```
|
||||
|
||||
#### `is_data_available`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
The implementation of `is_data_available` will become more sophisticated during later scaling upgrades.
|
||||
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`.
|
||||
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `verify_blob_kzg_proof_batch`.
|
||||
|
||||
The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned.
|
||||
|
||||
@@ -47,7 +36,8 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa
|
||||
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
# `retrieve_blobs_and_proofs` is implementation and context dependent
|
||||
# It returns all the blobs for the given block root, and raises an exception if not available
|
||||
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
# Note: the p2p network does not guarantee sidecar retrieval outside of
|
||||
# `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
|
||||
|
||||
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
|
||||
@@ -55,8 +45,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ
|
||||
if isinstance(blobs, str) or isinstance(proofs, str):
|
||||
return True
|
||||
|
||||
validate_blobs(blob_kzg_commitments, blobs, proofs)
|
||||
return True
|
||||
return verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, proofs)
|
||||
```
|
||||
|
||||
## Updated fork-choice handlers
|
||||
@@ -89,7 +78,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# [New in Deneb]
|
||||
# [New in Deneb:EIP4844]
|
||||
# Check if blob data is available
|
||||
# If not, this block MAY be queued and subsequently considered when blob data becomes available
|
||||
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)
|
||||
@@ -99,10 +88,6 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
block_root = hash_tree_root(block)
|
||||
state_transition(state, signed_block, True)
|
||||
|
||||
# Check the merge transition
|
||||
if is_merge_transition_block(pre_state, block.body):
|
||||
validate_merge_block(block)
|
||||
|
||||
# Add new block to the store
|
||||
store.blocks[block_root] = block
|
||||
# Add new state for this block to the store
|
||||
|
||||
@@ -83,7 +83,8 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
excess_data_gas=uint256(0), # [New in Deneb]
|
||||
data_gas_used=uint64(0), # [New in Deneb:EIP4844]
|
||||
excess_data_gas=uint64(0), # [New in Deneb:EIP4844]
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
@@ -125,7 +126,7 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb]
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb:EIP4844]
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
|
||||
@@ -41,7 +41,8 @@ def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHea
|
||||
block_hash=pre.execution.block_hash,
|
||||
transactions_root=pre.execution.transactions_root,
|
||||
withdrawals_root=pre.execution.withdrawals_root,
|
||||
excess_data_gas=uint256(0), # [New in Deneb]
|
||||
data_gas_used=uint64(0), # [New in Deneb:EIP4844]
|
||||
excess_data_gas=uint64(0), # [New in Deneb:EIP4844]
|
||||
),
|
||||
execution_branch=pre.execution_branch,
|
||||
)
|
||||
|
||||
@@ -47,8 +47,9 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
|
||||
# [New in Deneb]
|
||||
# [New in Deneb:EIP4844]
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
execution_header.data_gas_used = payload.data_gas_used
|
||||
execution_header.excess_data_gas = payload.excess_data_gas
|
||||
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
|
||||
@@ -66,9 +66,9 @@ def get_lc_execution_root(header: LightClientHeader) -> Root:
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
epoch = compute_epoch_at_slot(header.beacon.slot)
|
||||
|
||||
# [New in Deneb]
|
||||
# [New in Deneb:EIP4844]
|
||||
if epoch < DENEB_FORK_EPOCH:
|
||||
if header.execution.excess_data_gas != uint256(0):
|
||||
if header.execution.data_gas_used != uint64(0) or header.execution.excess_data_gas != uint64(0):
|
||||
return False
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
|
||||
@@ -22,7 +22,7 @@ The specification of these changes continues in the same format as the network s
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
||||
- [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
@@ -40,16 +40,21 @@ The specification of these changes continues in the same format as the network s
|
||||
|
||||
### Configuration
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
|
||||
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
||||
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
||||
| `BLOB_SIDECAR_SUBNET_COUNT` | `6` | The number of blob sidecar subnets used in the gossipsub protocol. |
|
||||
|
||||
### Containers
|
||||
|
||||
#### `BlobSidecar`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
```python
|
||||
class BlobSidecar(Container):
|
||||
block_root: Root
|
||||
@@ -64,6 +69,8 @@ class BlobSidecar(Container):
|
||||
|
||||
#### `SignedBlobSidecar`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
```python
|
||||
class SignedBlobSidecar(Container):
|
||||
message: BlobSidecar
|
||||
@@ -72,6 +79,8 @@ class SignedBlobSidecar(Container):
|
||||
|
||||
#### `BlobIdentifier`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
```python
|
||||
class BlobIdentifier(Container):
|
||||
block_root: Root
|
||||
@@ -107,7 +116,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
||||
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` [New in Deneb:EIP4844] |
|
||||
|
||||
##### Global topics
|
||||
|
||||
@@ -117,13 +126,20 @@ Deneb introduces new global topics for blob sidecars.
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
||||
|
||||
###### `blob_sidecar_{index}`
|
||||
New validation:
|
||||
|
||||
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
||||
- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer --
|
||||
i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK`
|
||||
|
||||
###### `blob_sidecar_{subnet_id}`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
|
||||
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
@@ -186,7 +202,7 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
|
||||
|
||||
New in deneb.
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -226,7 +242,7 @@ No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response.
|
||||
|
||||
Clients MUST respond with at least one sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
@@ -235,7 +251,7 @@ Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
|
||||
|
||||
New in deneb.
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@@ -264,7 +280,7 @@ Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, lea
|
||||
|
||||
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`.
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof_batch`.
|
||||
|
||||
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
|
||||
|
||||
@@ -273,18 +289,18 @@ The request MUST be encoded as an SSZ-container.
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
|
||||
Let `blob_serve_range` be `[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`.
|
||||
Clients MUST keep a record of signed blob sidecars seen on the epoch range `blob_serve_range`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blobs on this range.
|
||||
|
||||
Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Peers that are unable to reply to blob sidecar requests within the
|
||||
range `blob_serve_range` SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Such peers that are unable to successfully reply to this range of requests MAY get descored
|
||||
or disconnected at any time.
|
||||
|
||||
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
|
||||
MUST backfill the local blobs database to at least the range `blob_serve_range`
|
||||
to be fully compliant with `BlobSidecarsByRange` requests.
|
||||
|
||||
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations that are essential for the implementation of the EIP-4844 feature in the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
|
||||
|
||||
Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||
|
||||
@@ -77,6 +77,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
|
||||
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
|
||||
| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob |
|
||||
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
|
||||
| `KZG_ENDIANNESS` | `'big'` | The endianness of the field elements including blobs |
|
||||
|
||||
|
||||
## Preset
|
||||
@@ -161,7 +162,7 @@ def hash_to_bls_field(data: bytes) -> BLSFieldElement:
|
||||
The output is not uniform over the BLS field.
|
||||
"""
|
||||
hashed_data = hash(data)
|
||||
return BLSFieldElement(int.from_bytes(hashed_data, ENDIANNESS) % BLS_MODULUS)
|
||||
return BLSFieldElement(int.from_bytes(hashed_data, KZG_ENDIANNESS) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `bytes_to_bls_field`
|
||||
@@ -172,7 +173,7 @@ def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
Convert untrusted bytes to a trusted and validated BLS scalar field element.
|
||||
This function does not accept inputs greater than the BLS modulus.
|
||||
"""
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
field_element = int.from_bytes(b, KZG_ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
```
|
||||
@@ -237,7 +238,7 @@ def compute_challenge(blob: Blob,
|
||||
"""
|
||||
|
||||
# Append the degree of the polynomial as a domain separator
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, ENDIANNESS)
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, KZG_ENDIANNESS)
|
||||
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly
|
||||
|
||||
data += blob
|
||||
@@ -406,15 +407,15 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
|
||||
|
||||
# Compute a random challenge. Note that it does not have to be computed from a hash,
|
||||
# r just has to be random.
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
|
||||
num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS)
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, KZG_ENDIANNESS)
|
||||
num_commitments = int.to_bytes(len(commitments), 8, KZG_ENDIANNESS)
|
||||
data = RANDOM_CHALLENGE_KZG_BATCH_DOMAIN + degree_poly + num_commitments
|
||||
|
||||
# Append all inputs to the transcript before we hash
|
||||
for commitment, z, y, proof in zip(commitments, zs, ys, proofs):
|
||||
data += commitment \
|
||||
+ int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
|
||||
+ int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
|
||||
+ int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, KZG_ENDIANNESS) \
|
||||
+ int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, KZG_ENDIANNESS) \
|
||||
+ proof
|
||||
|
||||
r = hash_to_bls_field(data)
|
||||
@@ -451,7 +452,7 @@ def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]:
|
||||
assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes))
|
||||
return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
|
||||
return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, KZG_ENDIANNESS)
|
||||
```
|
||||
|
||||
#### `compute_quotient_eval_within_domain`
|
||||
@@ -566,7 +567,7 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
"""
|
||||
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
|
||||
|
||||
Will return True if there are zero blobs/commitments/proofs.
|
||||
Public method.
|
||||
"""
|
||||
|
||||
|
||||
@@ -11,7 +11,11 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments)
|
||||
- [`BlobsBundle`](#blobsbundle)
|
||||
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
|
||||
- [Protocol](#protocol)
|
||||
- [`ExecutionEngine`](#executionengine)
|
||||
- [Modified `get_payload`](#modified-get_payload)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block and sidecar proposal](#block-and-sidecar-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
@@ -36,17 +40,42 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
||||
|
||||
## Helpers
|
||||
|
||||
### `get_blobs_and_kzg_commitments`
|
||||
### `BlobsBundle`
|
||||
|
||||
The interface to retrieve blobs and corresponding kzg commitments.
|
||||
|
||||
Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` may be unified.
|
||||
Implementers may also retrieve blobs individually per transaction.
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
```python
|
||||
def get_blobs_and_kzg_commitments(
|
||||
payload_id: PayloadId
|
||||
) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]:
|
||||
@dataclass
|
||||
class BlobsBundle(object):
|
||||
commitments: Sequence[KZGCommitment]
|
||||
proofs: Sequence[KZGProof]
|
||||
blobs: Sequence[Blob]
|
||||
```
|
||||
|
||||
### Modified `GetPayloadResponse`
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class GetPayloadResponse(object):
|
||||
execution_payload: ExecutionPayload
|
||||
block_value: uint256
|
||||
blobs_bundle: BlobsBundle # [New in Deneb:EIP4844]
|
||||
```
|
||||
|
||||
## Protocol
|
||||
|
||||
### `ExecutionEngine`
|
||||
|
||||
#### Modified `get_payload`
|
||||
|
||||
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
|
||||
has been built since the corresponding call to `notify_forkchoice_updated` method.
|
||||
|
||||
```python
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
|
||||
"""
|
||||
Return ExecutionPayload, uint256, BlobsBundle objects.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
...
|
||||
```
|
||||
@@ -61,27 +90,17 @@ All validator responsibilities remain unchanged other than those noted below.
|
||||
|
||||
##### Blob KZG commitments
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
1. After retrieving the execution payload from the execution engine as specified in Capella,
|
||||
use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`.
|
||||
2. Validate `blobs` and `blob_kzg_commitments`:
|
||||
|
||||
```python
|
||||
def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_commitments: Sequence[KZGCommitment],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> None:
|
||||
# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
|
||||
assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments)
|
||||
|
||||
# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
|
||||
assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs)
|
||||
assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs)
|
||||
```
|
||||
|
||||
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
|
||||
use the `payload_id` to retrieve `blobs`, `blob_kzg_commitments`, and `blob_kzg_proofs`
|
||||
via `get_payload(payload_id).blobs_bundle`.
|
||||
2. Set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
|
||||
|
||||
#### Constructing the `SignedBlobSidecar`s
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
|
||||
|
||||
##### Sidecar
|
||||
@@ -108,7 +127,7 @@ def get_blob_sidecars(block: BeaconBlock,
|
||||
|
||||
```
|
||||
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
|
||||
`signature` is obtained from:
|
||||
|
||||
@@ -121,6 +140,15 @@ def get_blob_sidecar_signature(state: BeaconState,
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
The `subnet_id` for the `signed_sidecar` is calculated with:
|
||||
- Let `blob_index = signed_sidecar.message.index`.
|
||||
- Let `subnet_id = compute_subnet_for_blob_sidecar(blob_index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID:
|
||||
return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
|
||||
|
||||
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,
|
||||
|
||||
@@ -20,6 +20,8 @@ It consists of four main sections:
|
||||
- [Protocol Negotiation](#protocol-negotiation)
|
||||
- [Multiplexing](#multiplexing)
|
||||
- [Consensus-layer network interaction domains](#consensus-layer-network-interaction-domains)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [MetaData](#metadata)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
@@ -53,6 +55,7 @@ It consists of four main sections:
|
||||
- [ENR structure](#enr-structure)
|
||||
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
||||
- [`eth2` field](#eth2-field)
|
||||
- [Attestation subnet subcription](#attestation-subnet-subcription)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Transport](#transport-1)
|
||||
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
|
||||
@@ -165,22 +168,42 @@ See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||
|
||||
## Consensus-layer network interaction domains
|
||||
|
||||
### Custom types
|
||||
|
||||
We define the following Python custom types for type hinting and readability:
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `NodeID` | `uint256` | node identifier |
|
||||
| `SubnetID` | `uint64` | subnet identifier |
|
||||
|
||||
### Constants
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `NODE_ID_BITS` | `256` | The bit length of uint256 is 256 |
|
||||
|
||||
### Configuration
|
||||
|
||||
This section outlines constants that are used in this spec.
|
||||
This section outlines configurations that are used in this spec.
|
||||
|
||||
| Name | Value | Description |
|
||||
|---|---|---|
|
||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||
| `GOSSIP_MAX_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
||||
| `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) |
|
||||
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
|
||||
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||
| `MAX_CHUNK_SIZE` | `10 * 2**20` (=10485760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||
| `TTFB_TIMEOUT` | `5` | The maximum duration in **seconds** to wait for first byte of request response (time-to-first-byte). |
|
||||
| `RESP_TIMEOUT` | `10` | The maximum duration in **seconds** for complete response transfer. |
|
||||
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |
|
||||
| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500ms` | The maximum milliseconds of clock disparity assumed between honest nodes. |
|
||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||
| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes. |
|
||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `DomainType('0x00000000')` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `DomainType('0x01000000')` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||
| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. |
|
||||
| `ATTESTATION_SUBNET_COUNT` | `2**6` (= 64) | The number of attestation subnets used in the gossipsub protocol. |
|
||||
| `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet |
|
||||
| `ATTESTATION_SUBNET_PREFIX_BITS` | `int(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | |
|
||||
|
||||
### MetaData
|
||||
|
||||
@@ -979,6 +1002,34 @@ Clients MAY connect to peers with the same `fork_digest` but a different `next_f
|
||||
Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients,
|
||||
these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||
|
||||
### Attestation subnet subcription
|
||||
|
||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should:
|
||||
|
||||
* Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs.
|
||||
* Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets.
|
||||
* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function.
|
||||
|
||||
```python
|
||||
def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID:
|
||||
node_id_prefix = node_id >> (NODE_ID_BITS - ATTESTATION_SUBNET_PREFIX_BITS)
|
||||
node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION
|
||||
permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)))
|
||||
permutated_prefix = compute_shuffled_index(
|
||||
node_id_prefix,
|
||||
1 << ATTESTATION_SUBNET_PREFIX_BITS,
|
||||
permutation_seed,
|
||||
)
|
||||
return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
```python
|
||||
def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
|
||||
return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
|
||||
```
|
||||
|
||||
*Note*: When preparing for a hard fork, a node must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
|
||||
## Design decision rationale
|
||||
|
||||
### Transport
|
||||
@@ -1438,6 +1489,8 @@ the epoch range that a new node syncing from a checkpoint must backfill.
|
||||
[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size
|
||||
(`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`).
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
```python
|
||||
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
|
||||
@@ -63,7 +63,6 @@ This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain
|
||||
- [Aggregation bits](#aggregation-bits-1)
|
||||
- [Aggregate signature](#aggregate-signature-1)
|
||||
- [Broadcast aggregate](#broadcast-aggregate)
|
||||
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
|
||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||
- [Proposer slashing](#proposer-slashing)
|
||||
- [Attester slashing](#attester-slashing)
|
||||
@@ -88,10 +87,7 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
||||
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
|
||||
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators |
|
||||
|
||||
## Containers
|
||||
|
||||
@@ -513,7 +509,9 @@ The `subnet_id` for the `attestation` is calculated with:
|
||||
- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
def compute_subnet_for_attestation(committees_per_slot: uint64,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex) -> SubnetID:
|
||||
"""
|
||||
Compute the correct subnet for an attestation for Phase 0.
|
||||
Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
@@ -521,7 +519,7 @@ def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, comm
|
||||
slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
|
||||
return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
### Attestation aggregation
|
||||
@@ -604,18 +602,6 @@ def get_aggregate_and_proof_signature(state: BeaconState,
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
## Phase 0 attestation subnet stability
|
||||
|
||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
|
||||
|
||||
* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets
|
||||
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
||||
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
||||
|
||||
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
|
||||
|
||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
|
||||
## How to avoid slashing
|
||||
|
||||
"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.
|
||||
|
||||
@@ -159,7 +159,7 @@ these conditions.*
|
||||
|
||||
To optimistically import a block:
|
||||
|
||||
- The [`notify_new_payload`](../specs/bellatrix/beacon-chain.md#notify_new_payload) function MUST return `True` if the execution
|
||||
- The [`verify_and_notify_new_payload`](../specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload) function MUST return `True` if the execution
|
||||
engine returns `NOT_VALIDATED` or `VALID`. An `INVALIDATED` response MUST return `False`.
|
||||
- The [`validate_merge_block`](../specs/bellatrix/fork-choice.md#validate_merge_block)
|
||||
function MUST NOT raise an assertion if both the
|
||||
@@ -172,7 +172,7 @@ In addition to this change in validation, the consensus engine MUST track which
|
||||
blocks returned `NOT_VALIDATED` and which returned `VALID` for subsequent processing.
|
||||
|
||||
Optimistically imported blocks MUST pass all verifications included in
|
||||
`process_block` (withstanding the modifications to `notify_new_payload`).
|
||||
`process_block` (withstanding the modifications to `verify_and_notify_new_payload`).
|
||||
|
||||
A consensus engine MUST be able to retrospectively (i.e., after import) modify
|
||||
the status of `NOT_VALIDATED` blocks to be either `VALID` or `INVALIDATED` based upon responses
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.3.0
|
||||
1.4.0-alpha.3
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
from eth_utils import encode_hex
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
@@ -8,24 +11,80 @@ import sys
|
||||
import json
|
||||
from typing import Iterable, AnyStr, Any, Callable
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from filelock import FileLock
|
||||
from snappy import compress
|
||||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
from .settings import (
|
||||
GENERATOR_MODE,
|
||||
MODE_MULTIPROCESSING,
|
||||
MODE_SINGLE_PROCESS,
|
||||
NUM_PROCESS,
|
||||
TIME_THRESHOLD_TO_PRINT,
|
||||
)
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
|
||||
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
||||
@dataclass
|
||||
class Diagnostics(object):
|
||||
collected_test_count: int = 0
|
||||
generated_test_count: int = 0
|
||||
skipped_test_count: int = 0
|
||||
test_identifiers: list = field(default_factory=list)
|
||||
|
||||
|
||||
TestCaseParams = namedtuple(
|
||||
'TestCaseParams', [
|
||||
'test_case', 'case_dir', 'log_file', 'file_mode',
|
||||
])
|
||||
|
||||
|
||||
def worker_function(item):
|
||||
return generate_test_vector(*item)
|
||||
|
||||
|
||||
def get_default_yaml():
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def get_cfg_yaml():
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
return cfg_yaml
|
||||
|
||||
|
||||
def validate_output_dir(path_str):
|
||||
@@ -40,6 +99,47 @@ def validate_output_dir(path_str):
|
||||
return path
|
||||
|
||||
|
||||
def get_test_case_dir(test_case, output_dir):
|
||||
return (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
|
||||
|
||||
def get_test_identifier(test_case):
|
||||
return "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
|
||||
|
||||
def get_incomplete_tag_file(case_dir):
|
||||
return case_dir / "INCOMPLETE"
|
||||
|
||||
|
||||
def should_skip_case_dir(case_dir, is_force, diagnostics_obj):
|
||||
is_skip = False
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
|
||||
if case_dir.exists():
|
||||
if not is_force and not incomplete_tag_file.exists():
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
is_skip = True
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
|
||||
return is_skip, diagnostics_obj
|
||||
|
||||
|
||||
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
"""
|
||||
Implementation for a general test generator.
|
||||
@@ -94,28 +194,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
else:
|
||||
file_mode = "w"
|
||||
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
|
||||
log_file = Path(output_dir) / 'testgen_error_log.txt'
|
||||
|
||||
print(f"Generating tests into {output_dir}")
|
||||
@@ -129,12 +207,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
|
||||
|
||||
collect_only = args.collect_only
|
||||
collected_test_count = 0
|
||||
generated_test_count = 0
|
||||
skipped_test_count = 0
|
||||
test_identifiers = []
|
||||
|
||||
diagnostics_obj = Diagnostics()
|
||||
provider_start = time.time()
|
||||
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
all_test_case_params = []
|
||||
|
||||
for tprov in test_providers:
|
||||
if not collect_only:
|
||||
# runs anything that we don't want to repeat for every test case.
|
||||
@@ -145,146 +224,133 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
if len(presets) != 0 and test_case.preset_name not in presets:
|
||||
continue
|
||||
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
collected_test_count += 1
|
||||
case_dir = get_test_case_dir(test_case, output_dir)
|
||||
print(f"Collected test at: {case_dir}")
|
||||
diagnostics_obj.collected_test_count += 1
|
||||
|
||||
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||
is_skip, diagnostics_obj = should_skip_case_dir(case_dir, args.force, diagnostics_obj)
|
||||
if is_skip:
|
||||
continue
|
||||
|
||||
if case_dir.exists():
|
||||
if not args.force and not incomplete_tag_file.exists():
|
||||
skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
continue
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
if GENERATOR_MODE == MODE_SINGLE_PROCESS:
|
||||
result = generate_test_vector(test_case, case_dir, log_file, file_mode)
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
elif GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
item = TestCaseParams(test_case, case_dir, log_file, file_mode)
|
||||
all_test_case_params.append(item)
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
with Pool(processes=NUM_PROCESS) as pool:
|
||||
results = pool.map(worker_function, iter(all_test_case_params))
|
||||
|
||||
written_part = False
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
try:
|
||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except IOError as e:
|
||||
error_message = (
|
||||
f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
)
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
|
||||
sys.exit(error_message)
|
||||
|
||||
meta = dict()
|
||||
|
||||
try:
|
||||
for (name, out_kind, data) in test_case.case_fn():
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
assert False # Unknown kind
|
||||
except SkippedTest as e:
|
||||
print(e)
|
||||
skipped_test_count += 1
|
||||
shutil.rmtree(case_dir)
|
||||
continue
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part("data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
except Exception as e:
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
shutil.rmtree(case_dir)
|
||||
else:
|
||||
generated_test_count += 1
|
||||
test_identifier = "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
test_identifiers.append(test_identifier)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
for result in results:
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
|
||||
provider_end = time.time()
|
||||
span = round(provider_end - provider_start, 2)
|
||||
|
||||
if collect_only:
|
||||
print(f"Collected {collected_test_count} tests in total")
|
||||
print(f"Collected {diagnostics_obj.collected_test_count} tests in total")
|
||||
else:
|
||||
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
|
||||
summary_message += f" ({skipped_test_count} skipped tests)"
|
||||
summary_message = f"completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests"
|
||||
summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)"
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
summary_message += f" in {span} seconds"
|
||||
print(summary_message)
|
||||
diagnostics = {
|
||||
"collected_test_count": collected_test_count,
|
||||
"generated_test_count": generated_test_count,
|
||||
"skipped_test_count": skipped_test_count,
|
||||
"test_identifiers": test_identifiers,
|
||||
|
||||
diagnostics_output = {
|
||||
"collected_test_count": diagnostics_obj.collected_test_count,
|
||||
"generated_test_count": diagnostics_obj.generated_test_count,
|
||||
"skipped_test_count": diagnostics_obj.skipped_test_count,
|
||||
"test_identifiers": diagnostics_obj.test_identifiers,
|
||||
"durations": [f"{span} seconds"],
|
||||
}
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock"))
|
||||
with diagnostics_lock:
|
||||
diagnostics_path.touch(exist_ok=True)
|
||||
if os.path.getsize(diagnostics_path) == 0:
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(diagnostics, f)
|
||||
json.dump(diagnostics_output, f)
|
||||
else:
|
||||
with open(diagnostics_path, "r+") as f:
|
||||
existing_diagnostics = json.load(f)
|
||||
for k, v in diagnostics.items():
|
||||
for k, v in diagnostics_output.items():
|
||||
existing_diagnostics[k] += v
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(existing_diagnostics, f)
|
||||
print(f"wrote diagnostics to {diagnostics_path}")
|
||||
print(f"wrote diagnostics_obj to {diagnostics_path}")
|
||||
|
||||
|
||||
def generate_test_vector(test_case, case_dir, log_file, file_mode):
|
||||
cfg_yaml = get_cfg_yaml()
|
||||
yaml = get_default_yaml()
|
||||
|
||||
written_part = False
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
result = None
|
||||
try:
|
||||
meta = dict()
|
||||
try:
|
||||
written_part, meta = execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml)
|
||||
except SkippedTest as e:
|
||||
result = 0 # 0 means skipped
|
||||
print(e)
|
||||
shutil.rmtree(case_dir)
|
||||
return result
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part(case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
except Exception as e:
|
||||
result = -1 # -1 means error
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
print(f"[Error] test case {case_dir} did not produce any written_part")
|
||||
shutil.rmtree(case_dir)
|
||||
result = -1
|
||||
else:
|
||||
result = get_test_identifier(test_case)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_result_into_diagnostics_obj(result, diagnostics_obj):
|
||||
if result == -1: # error
|
||||
pass
|
||||
elif result == 0:
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
elif result is not None:
|
||||
diagnostics_obj.generated_test_count += 1
|
||||
diagnostics_obj.test_identifiers.append(result)
|
||||
else:
|
||||
raise Exception(f"Unexpected result: {result}")
|
||||
|
||||
|
||||
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
@@ -292,9 +358,45 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
out_path = case_path / Path(name + '.yaml')
|
||||
with out_path.open(file_mode) as f:
|
||||
yaml_encoder.dump(data, f)
|
||||
f.close()
|
||||
return dump
|
||||
|
||||
|
||||
def output_part(case_dir, log_file, out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except (IOError, ValueError) as e:
|
||||
error_message = f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
sys.exit(error_message)
|
||||
|
||||
|
||||
def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml):
|
||||
result = test_case.case_fn()
|
||||
written_part = False
|
||||
for (name, out_kind, data) in result:
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
raise ValueError("Unknown out_kind %s" % out_kind)
|
||||
|
||||
return written_part, meta
|
||||
|
||||
|
||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.ssz_snappy')
|
||||
|
||||
13
tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py
Normal file
13
tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import multiprocessing
|
||||
|
||||
|
||||
# Generator mode setting
|
||||
MODE_SINGLE_PROCESS = 'MODE_SINGLE_PROCESS'
|
||||
MODE_MULTIPROCESSING = 'MODE_MULTIPROCESSING'
|
||||
# Test generator mode
|
||||
GENERATOR_MODE = MODE_SINGLE_PROCESS
|
||||
# Number of subprocesses when using MODE_MULTIPROCESSING
|
||||
NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1
|
||||
|
||||
# Diagnostics
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
||||
@@ -1,12 +1,12 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
with_light_client,
|
||||
with_test_suite_name,
|
||||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test
|
||||
def test_current_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
@@ -27,7 +27,7 @@ def test_current_sync_committee_merkle_proof(spec, state):
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test
|
||||
def test_next_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
@@ -48,7 +48,7 @@ def test_next_sync_committee_merkle_proof(spec, state):
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test
|
||||
def test_finality_root_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
|
||||
@@ -9,7 +9,7 @@ from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_presets,
|
||||
with_state,
|
||||
with_altair_and_later,
|
||||
with_light_client,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_slots_with_attestations,
|
||||
@@ -26,7 +26,6 @@ from eth2spec.test.helpers.fork_transition import (
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella, is_post_deneb,
|
||||
is_post_fork,
|
||||
is_post_eip6110,
|
||||
)
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
@@ -58,10 +57,6 @@ def needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
return is_post_deneb(s_spec) and not is_post_deneb(d_spec)
|
||||
|
||||
|
||||
def needs_upgrade_to_eip6110(d_spec, s_spec):
|
||||
return is_post_eip6110(s_spec) and not is_post_eip6110(d_spec)
|
||||
|
||||
|
||||
def check_lc_header_equal(d_spec, s_spec, data, upgraded):
|
||||
assert upgraded.beacon.slot == data.beacon.slot
|
||||
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
|
||||
@@ -89,10 +84,6 @@ def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_eip6110(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_eip6110(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
@@ -154,8 +145,6 @@ class LightClientSyncTest(object):
|
||||
|
||||
|
||||
def get_store_fork_version(s_spec):
|
||||
if is_post_eip6110(s_spec):
|
||||
return s_spec.config.EIP6110_FORK_VERSION
|
||||
if is_post_deneb(s_spec):
|
||||
return s_spec.config.DENEB_FORK_VERSION
|
||||
if is_post_capella(s_spec):
|
||||
@@ -301,7 +290,7 @@ def compute_start_slot_at_next_sync_committee_period(spec, state):
|
||||
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_light_client_sync(spec, state):
|
||||
@@ -523,7 +512,7 @@ def test_light_client_sync(spec, state):
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_supply_sync_committee_from_past_update(spec, state):
|
||||
@@ -553,7 +542,7 @@ def test_supply_sync_committee_from_past_update(spec, state):
|
||||
yield from finish_test(test)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_advance_finality_without_sync_committee(spec, state):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
with_altair_and_later,
|
||||
with_light_client,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_slots_with_attestations,
|
||||
@@ -29,7 +29,7 @@ def create_test_update(spec, test, with_next, with_finality, participation_rate)
|
||||
)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_update_ranking(spec, state):
|
||||
|
||||
@@ -3,7 +3,7 @@ from copy import deepcopy
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test_with_matching_config,
|
||||
with_presets,
|
||||
with_altair_and_later,
|
||||
with_light_client,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_epoch_with_attestations,
|
||||
@@ -29,7 +29,7 @@ def setup_test(spec, state):
|
||||
return (trusted_block, store)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
def test_process_light_client_update_not_timeout(spec, state):
|
||||
genesis_block, store = setup_test(spec, state)
|
||||
@@ -61,7 +61,7 @@ def test_process_light_client_update_not_timeout(spec, state):
|
||||
assert store.current_max_active_participants > 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_at_period_boundary(spec, state):
|
||||
@@ -96,7 +96,7 @@ def test_process_light_client_update_at_period_boundary(spec, state):
|
||||
assert store.current_max_active_participants > 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_timeout(spec, state):
|
||||
@@ -131,7 +131,7 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
assert store.current_max_active_participants > 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_process_light_client_update_finality_updated(spec, state):
|
||||
|
||||
@@ -8,7 +8,13 @@ from eth2spec.test.helpers.execution_payload import (
|
||||
build_state_with_incomplete_transition,
|
||||
build_state_with_complete_transition,
|
||||
)
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_bellatrix_and_later
|
||||
from eth2spec.test.context import (
|
||||
BELLATRIX,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_bellatrix_and_later,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
|
||||
|
||||
@@ -21,33 +27,35 @@ def run_execution_payload_processing(spec, state, execution_payload, valid=True,
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# Before Deneb, only `body.execution_payload` matters. `BeaconBlockBody` is just a wrapper.
|
||||
body = spec.BeaconBlockBody(execution_payload=execution_payload)
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution', {'execution_valid': execution_valid}
|
||||
yield 'execution_payload', execution_payload
|
||||
yield 'body', body
|
||||
|
||||
called_new_block = False
|
||||
|
||||
class TestEngine(spec.NoopExecutionEngine):
|
||||
def notify_new_payload(self, payload) -> bool:
|
||||
def verify_and_notify_new_payload(self, new_payload_request) -> bool:
|
||||
nonlocal called_new_block, execution_valid
|
||||
called_new_block = True
|
||||
assert payload == execution_payload
|
||||
assert new_payload_request.execution_payload == body.execution_payload
|
||||
return execution_valid
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_execution_payload(state, execution_payload, TestEngine()))
|
||||
expect_assertion_error(lambda: spec.process_execution_payload(state, body, TestEngine()))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_execution_payload(state, execution_payload, TestEngine())
|
||||
spec.process_execution_payload(state, body, TestEngine())
|
||||
|
||||
# Make sure we called the engine
|
||||
assert called_new_block
|
||||
|
||||
yield 'post', state
|
||||
|
||||
assert state.latest_execution_payload_header == get_execution_payload_header(spec, execution_payload)
|
||||
assert state.latest_execution_payload_header == get_execution_payload_header(spec, body.execution_payload)
|
||||
|
||||
|
||||
def run_success_test(spec, state):
|
||||
@@ -117,7 +125,7 @@ def test_invalid_bad_execution_regular_payload(spec, state):
|
||||
yield from run_bad_execution_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases([BELLATRIX])
|
||||
@spec_state_test
|
||||
def test_bad_parent_hash_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
|
||||
@@ -2,6 +2,11 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
with_bellatrix_and_later,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
BELLATRIX,
|
||||
CAPELLA,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import (
|
||||
@@ -12,8 +17,10 @@ from eth2spec.test.helpers.voluntary_exits import (
|
||||
sign_voluntary_exit,
|
||||
)
|
||||
|
||||
BELLATRIX_AND_CAPELLA = [BELLATRIX, CAPELLA]
|
||||
|
||||
def _run_voluntary_exit_processing_test(
|
||||
|
||||
def run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version,
|
||||
@@ -51,7 +58,7 @@ def _run_voluntary_exit_processing_test(
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(spec, state):
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.current_version,
|
||||
@@ -60,11 +67,11 @@ def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(s
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases(BELLATRIX_AND_CAPELLA)
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.current_version,
|
||||
@@ -72,13 +79,13 @@ def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec,
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases([BELLATRIX, CAPELLA])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
@@ -86,13 +93,13 @@ def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, st
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases(BELLATRIX_AND_CAPELLA)
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
@@ -107,7 +114,7 @@ def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_ep
|
||||
def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.config.GENESIS_FORK_VERSION,
|
||||
@@ -122,7 +129,7 @@ def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(s
|
||||
def test_invalid_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.config.GENESIS_FORK_VERSION,
|
||||
|
||||
@@ -10,7 +10,10 @@ from eth2spec.test.helpers.execution_payload import (
|
||||
build_randomized_execution_payload
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_bellatrix_and_later, spec_state_test
|
||||
BELLATRIX,
|
||||
with_bellatrix_and_later,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
@@ -44,7 +47,7 @@ def test_empty_block_transition_randomized_payload(spec, state):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases([BELLATRIX])
|
||||
@spec_state_test
|
||||
def test_is_execution_enabled_false(spec, state):
|
||||
# Set `latest_execution_payload_header` to empty
|
||||
|
||||
@@ -4,9 +4,12 @@ from typing import Optional
|
||||
from eth2spec.test.helpers.pow_block import (
|
||||
prepare_random_pow_chain,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
BELLATRIX,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_bellatrix_and_later,
|
||||
with_phases,
|
||||
)
|
||||
|
||||
|
||||
@@ -30,7 +33,7 @@ expected_results = [
|
||||
# it would return the first block (IS_HEAD_PARENT_BLOCK).
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases([BELLATRIX])
|
||||
@spec_state_test
|
||||
def test_get_pow_block_at_terminal_total_difficulty(spec, state):
|
||||
for result in expected_results:
|
||||
@@ -90,7 +93,7 @@ prepare_execution_payload_expected_results = [
|
||||
]
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@with_phases([BELLATRIX])
|
||||
@spec_state_test
|
||||
def test_prepare_execution_payload(spec, state):
|
||||
for result in prepare_execution_payload_expected_results:
|
||||
@@ -157,11 +160,11 @@ def test_prepare_execution_payload(spec, state):
|
||||
|
||||
payload_id = spec.prepare_execution_payload(
|
||||
state=state,
|
||||
pow_chain=pow_chain.to_dict(),
|
||||
safe_block_hash=safe_block_hash,
|
||||
finalized_block_hash=finalized_block_hash,
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
execution_engine=TestEngine(),
|
||||
pow_chain=pow_chain.to_dict(),
|
||||
)
|
||||
assert payload_id == result_payload_id
|
||||
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
compute_el_block_hash,
|
||||
build_state_with_incomplete_transition,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
from eth2spec.test.bellatrix.block_processing.test_process_execution_payload import run_execution_payload_processing
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_bad_parent_hash_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.parent_hash = b'\x55' * 32
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
@@ -56,7 +56,7 @@ def verify_post_state(state, spec, expected_withdrawals,
|
||||
def run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=None,
|
||||
fully_withdrawable_indices=None, partial_withdrawals_indices=None, valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
Run ``process_withdrawals``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- post-state ('post').
|
||||
|
||||
@@ -31,6 +31,29 @@ from eth2spec.test.helpers.deposits import (
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
|
||||
|
||||
#
|
||||
# `is_execution_enabled` has been removed from Capella
|
||||
#
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_is_execution_enabled_false(spec, state):
|
||||
# Set `latest_execution_payload_header` to empty
|
||||
state.latest_execution_payload_header = spec.ExecutionPayloadHeader()
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Set `execution_payload` to empty
|
||||
block.body.execution_payload = spec.ExecutionPayload()
|
||||
assert len(block.body.execution_payload.transactions) == 0
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
#
|
||||
# BLSToExecutionChange
|
||||
#
|
||||
|
||||
@@ -18,6 +18,7 @@ from .helpers.constants import (
|
||||
MINIMAL, MAINNET,
|
||||
ALL_PHASES,
|
||||
ALL_FORK_UPGRADES,
|
||||
LIGHT_CLIENT_TESTING_FORKS,
|
||||
)
|
||||
from .helpers.forks import is_post_fork
|
||||
from .helpers.typing import SpecForkName, PresetBaseName
|
||||
@@ -428,13 +429,6 @@ def with_all_phases_except(exclusion_phases):
|
||||
return decorator
|
||||
|
||||
|
||||
with_altair_and_later = with_all_phases_from(ALTAIR)
|
||||
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
|
||||
with_capella_and_later = with_all_phases_from(CAPELLA)
|
||||
with_deneb_and_later = with_all_phases_from(DENEB)
|
||||
with_eip6110_and_later = with_all_phases_from(EIP6110)
|
||||
|
||||
|
||||
def _get_preset_targets(kw):
|
||||
preset_name = DEFAULT_TEST_PRESET
|
||||
if 'preset' in kw:
|
||||
@@ -540,6 +534,14 @@ def with_presets(preset_bases, reason=None):
|
||||
return decorator
|
||||
|
||||
|
||||
with_altair_and_later = with_all_phases_from(ALTAIR)
|
||||
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
|
||||
with_capella_and_later = with_all_phases_from(CAPELLA)
|
||||
with_deneb_and_later = with_all_phases_from(DENEB)
|
||||
with_eip6110_and_later = with_all_phases_from(EIP6110)
|
||||
with_light_client = with_phases(LIGHT_CLIENT_TESTING_FORKS)
|
||||
|
||||
|
||||
class quoted_str(str):
|
||||
pass
|
||||
|
||||
@@ -560,7 +562,7 @@ def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return result
|
||||
|
||||
|
||||
def _get_copy_of_spec(spec):
|
||||
def get_copy_of_spec(spec):
|
||||
fork = spec.fork
|
||||
preset = spec.config.PRESET_BASE
|
||||
module_path = f"eth2spec.{fork}.{preset}"
|
||||
@@ -601,14 +603,14 @@ def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
# Apply config overrides to spec
|
||||
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
|
||||
spec, output_config = spec_with_config_overrides(get_copy_of_spec(spec), config_overrides)
|
||||
|
||||
# Apply config overrides to additional phases, if present
|
||||
if 'phases' in kw:
|
||||
phases = {}
|
||||
for fork in kw['phases']:
|
||||
phases[fork], output = spec_with_config_overrides(
|
||||
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
if emitted_fork == fork:
|
||||
output_config = output
|
||||
kw['phases'] = phases
|
||||
|
||||
@@ -0,0 +1,205 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
compute_el_block_hash,
|
||||
get_execution_payload_header,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
with_deneb_and_later
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
||||
|
||||
def run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments,
|
||||
valid=True, execution_valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- execution details, to mock EVM execution ('execution.yml', a dict with 'execution_valid' key and boolean value)
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# Before Deneb, only `body.execution_payload` matters. `BeaconBlockBody` is just a wrapper.
|
||||
body = spec.BeaconBlockBody(
|
||||
blob_kzg_commitments=blob_kzg_commitments,
|
||||
execution_payload=execution_payload
|
||||
)
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution', {'execution_valid': execution_valid}
|
||||
yield 'body', body
|
||||
|
||||
called_new_block = False
|
||||
|
||||
class TestEngine(spec.NoopExecutionEngine):
|
||||
def verify_and_notify_new_payload(self, new_payload_request) -> bool:
|
||||
nonlocal called_new_block, execution_valid
|
||||
called_new_block = True
|
||||
assert new_payload_request.execution_payload == body.execution_payload
|
||||
return execution_valid
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_execution_payload(state, body, TestEngine()))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_execution_payload(state, body, TestEngine())
|
||||
|
||||
# Make sure we called the engine
|
||||
assert called_new_block
|
||||
|
||||
yield 'post', state
|
||||
|
||||
assert state.latest_execution_payload_header == get_execution_payload_header(spec, body.execution_payload)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_blob_tx_type(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
opaque_tx = b'\x04' + opaque_tx[1:] # incorrect tx type
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_transaction_length_1_byte(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
opaque_tx = opaque_tx + b'\x12' # incorrect tx length
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_transaction_length_32_bytes(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
opaque_tx = opaque_tx + b'\x12' * 32 # incorrect tx length
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_commitment(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
blob_kzg_commitments[0] = b'\x12' * 48 # incorrect commitment
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_commitments_order(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=2, rng=Random(1111))
|
||||
blob_kzg_commitments = [blob_kzg_commitments[1], blob_kzg_commitments[0]] # incorrect order
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_incorrect_block_hash(spec, state):
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = b'\x12' * 32 # incorrect block hash
|
||||
|
||||
# CL itself doesn't verify EL block hash
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_zeroed_commitment(spec, state):
|
||||
"""
|
||||
The blob is invalid, but the commitment is in correct form.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1, is_valid_blob=False)
|
||||
assert all(commitment == b'\x00' * 48 for commitment in blob_kzg_commitments)
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_correct_input__execution_invalid(spec, state):
|
||||
"""
|
||||
The versioned hashes are wrong, but the testing ExecutionEngine returns VALID by default.
|
||||
"""
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments,
|
||||
valid=False, execution_valid=False)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_exceed_max_blobs_per_block(spec, state):
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK + 1)
|
||||
|
||||
execution_payload.transactions = [opaque_tx]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments, valid=False)
|
||||
@@ -0,0 +1,88 @@
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
DENEB,
|
||||
)
|
||||
from eth2spec.test.bellatrix.block_processing.test_process_voluntary_exit import (
|
||||
run_voluntary_exit_processing_test,
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
"""
|
||||
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
|
||||
"""
|
||||
assert state.fork.current_version != spec.config.CAPELLA_FORK_VERSION
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.current_version,
|
||||
is_before_fork_epoch=False,
|
||||
valid=False,
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
"""
|
||||
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
|
||||
|
||||
Note: This test is valid for ``spec.fork == DENEB`` and invalid for subsequent forks
|
||||
"""
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
if spec.fork == DENEB:
|
||||
assert state.fork.previous_version == spec.config.CAPELLA_FORK_VERSION
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
is_before_fork_epoch=False,
|
||||
)
|
||||
else:
|
||||
assert state.fork.previous_version != spec.config.CAPELLA_FORK_VERSION
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
is_before_fork_epoch=False,
|
||||
valid=False,
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
|
||||
"""
|
||||
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
|
||||
|
||||
Note: This test is valid for ``spec.fork == DENEB`` and invalid for subsequent forks
|
||||
"""
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
if spec.fork == DENEB:
|
||||
assert state.fork.previous_version == spec.config.CAPELLA_FORK_VERSION
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
is_before_fork_epoch=True,
|
||||
)
|
||||
else:
|
||||
assert state.fork.previous_version != spec.config.CAPELLA_FORK_VERSION
|
||||
yield from run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
is_before_fork_epoch=True,
|
||||
valid=False,
|
||||
)
|
||||
@@ -16,33 +16,45 @@ from eth2spec.test.helpers.sharding import (
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_one_blob(spec, state):
|
||||
def run_block_with_blobs(spec, state, blob_count, data_gas_used=1, excess_data_gas=1, valid=True):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.data_gas_used = data_gas_used
|
||||
block.body.execution_payload.excess_data_gas = excess_data_gas
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
if valid:
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
else:
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
yield 'post', state if valid else None
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_zero_blob(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_max_blobs(spec, state):
|
||||
yield 'pre', state
|
||||
def test_one_blob(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=1)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_max_blobs_per_block(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_exceed_max_blobs_per_block(spec, state):
|
||||
yield from run_block_with_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK + 1, valid=False)
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
||||
|
||||
def _run_validate_blobs(spec, state, blob_count):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs)
|
||||
blobs = [sidecar.blob for sidecar in blob_sidecars]
|
||||
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
|
||||
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_zero_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_one_blob(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=1)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_two_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=2)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_max_blobs(spec, state):
|
||||
_run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
@@ -33,7 +33,7 @@ def bls_add_one(x):
|
||||
|
||||
|
||||
def field_element_bytes(x):
|
||||
return int.to_bytes(x % BLS_MODULUS, 32, "little")
|
||||
return int.to_bytes(x % BLS_MODULUS, 32, "big")
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@@ -304,7 +304,7 @@ def test_bytes_to_bls_field_modulus_minus_one(spec):
|
||||
Verify that `bytes_to_bls_field` handles modulus minus one
|
||||
"""
|
||||
|
||||
spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS))
|
||||
spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.KZG_ENDIANNESS))
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@@ -316,7 +316,7 @@ def test_bytes_to_bls_field_modulus(spec):
|
||||
"""
|
||||
|
||||
expect_assertion_error(lambda: spec.bytes_to_bls_field(
|
||||
BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)
|
||||
BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.KZG_ENDIANNESS)
|
||||
))
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
from eth2spec.test.context import (
|
||||
single_phase,
|
||||
spec_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_length(spec):
|
||||
assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_networking(spec):
|
||||
assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
assert spec.config.MAX_REQUEST_BLOB_SIDECARS == spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.MAX_BLOBS_PER_BLOCK
|
||||
# Start with the same size, but `BLOB_SIDECAR_SUBNET_COUNT` could potentially increase later.
|
||||
assert spec.config.BLOB_SIDECAR_SUBNET_COUNT == spec.MAX_BLOBS_PER_BLOCK
|
||||
@@ -1,23 +0,0 @@
|
||||
|
||||
from eth2spec.test.helpers.constants import (
|
||||
DENEB,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
)
|
||||
|
||||
|
||||
@with_phases([DENEB])
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_tx_peek_blob_versioned_hashes(spec, state):
|
||||
otx, _, commitments, _ = get_sample_opaque_tx(spec)
|
||||
data_hashes = spec.tx_peek_blob_versioned_hashes(otx)
|
||||
expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments]
|
||||
assert expected == data_hashes
|
||||
@@ -2,7 +2,6 @@ from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
expect_assertion_error
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
@@ -18,96 +17,6 @@ from eth2spec.test.helpers.keys import (
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
spec.validate_blobs_and_kzg_commitments(block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs[:-1],
|
||||
blob_kzg_commitments,
|
||||
proofs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs[:-1]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state):
|
||||
"""
|
||||
Test `validate_blobs_and_kzg_commitments`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:])
|
||||
|
||||
expect_assertion_error(
|
||||
lambda: spec.validate_blobs_and_kzg_commitments(
|
||||
block.body.execution_payload,
|
||||
blobs,
|
||||
blob_kzg_commitments,
|
||||
proofs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_blob_sidecar_signature(spec, state):
|
||||
|
||||
@@ -4,6 +4,7 @@ from .typing import SpecForkName, PresetBaseName
|
||||
#
|
||||
# SpecForkName
|
||||
#
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
ALTAIR = SpecForkName('altair')
|
||||
@@ -17,15 +18,25 @@ CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
EIP6110 = SpecForkName('eip6110')
|
||||
|
||||
#
|
||||
# SpecFork settings
|
||||
#
|
||||
|
||||
# The forks that are deployed on Mainnet
|
||||
MAINNET_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA)
|
||||
LATEST_FORK = MAINNET_FORKS[-1]
|
||||
# The forks that pytest can run with.
|
||||
ALL_PHASES = (
|
||||
# Formal forks
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
# Experimental patches
|
||||
*MAINNET_FORKS,
|
||||
DENEB,
|
||||
# Experimental features
|
||||
EIP6110,
|
||||
)
|
||||
# The forks that have light client specs
|
||||
LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], DENEB)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110)
|
||||
TESTGEN_FORKS = (*MAINNET_FORKS, DENEB, EIP6110)
|
||||
|
||||
ALL_FORK_UPGRADES = {
|
||||
# pre_fork_name: post_fork_name
|
||||
@@ -46,7 +57,7 @@ AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
|
||||
AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items()
|
||||
|
||||
#
|
||||
# Config
|
||||
# Config and Preset
|
||||
#
|
||||
MAINNET = PresetBaseName('mainnet')
|
||||
MINIMAL = PresetBaseName('minimal')
|
||||
|
||||
@@ -31,6 +31,7 @@ def get_execution_payload_header(spec, execution_payload):
|
||||
if is_post_capella(spec):
|
||||
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
|
||||
if is_post_deneb(spec):
|
||||
payload_header.data_gas_used = execution_payload.data_gas_used
|
||||
payload_header.excess_data_gas = execution_payload.excess_data_gas
|
||||
if is_post_eip6110(spec):
|
||||
payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts)
|
||||
@@ -98,6 +99,7 @@ def compute_el_header_block_hash(spec,
|
||||
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
|
||||
if is_post_deneb(spec):
|
||||
# excess_data_gas
|
||||
execution_payload_header_rlp.append((big_endian_int, payload_header.data_gas_used))
|
||||
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
|
||||
if is_post_eip6110(spec):
|
||||
# deposit_receipts_root
|
||||
@@ -200,6 +202,9 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||
)
|
||||
if is_post_capella(spec):
|
||||
payload.withdrawals = spec.get_expected_withdrawals(state)
|
||||
if is_post_deneb(spec):
|
||||
payload.data_gas_used = 0
|
||||
payload.excess_data_gas = 0
|
||||
if is_post_eip6110(spec):
|
||||
# just to be clear
|
||||
payload.deposit_receipts = []
|
||||
|
||||
@@ -91,7 +91,7 @@ def add_optimistic_block(spec, mega_store, signed_block, test_steps,
|
||||
Add a block with optimistic sync logic
|
||||
|
||||
``valid`` indicates if the given ``signed_block.message.body.execution_payload`` is valid/invalid
|
||||
from ``notify_new_payload`` method response.
|
||||
from ``verify_and_notify_new_payload`` method response.
|
||||
"""
|
||||
block = signed_block.message
|
||||
block_root = block.hash_tree_root()
|
||||
|
||||
@@ -50,18 +50,15 @@ class SignedBlobTransaction(Container):
|
||||
signature: ECDSASignature
|
||||
|
||||
|
||||
def get_sample_blob(spec, rng=None):
|
||||
if rng is None:
|
||||
rng = random.Random(5566)
|
||||
|
||||
def get_sample_blob(spec, rng=random.Random(5566), is_valid_blob=True):
|
||||
values = [
|
||||
rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
rng.randint(0, spec.BLS_MODULUS - 1) if is_valid_blob else spec.BLS_MODULUS
|
||||
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
]
|
||||
|
||||
b = bytes()
|
||||
for v in values:
|
||||
b += v.to_bytes(32, spec.ENDIANNESS)
|
||||
b += v.to_bytes(32, spec.KZG_ENDIANNESS)
|
||||
|
||||
return spec.Blob(b)
|
||||
|
||||
@@ -98,15 +95,19 @@ def get_poly_in_both_forms(spec, rng=None):
|
||||
return coeffs, evals
|
||||
|
||||
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blob=True):
|
||||
blobs = []
|
||||
blob_kzg_commitments = []
|
||||
blob_kzg_proofs = []
|
||||
blob_versioned_hashes = []
|
||||
for _ in range(blob_count):
|
||||
blob = get_sample_blob(spec, rng)
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
|
||||
blob = get_sample_blob(spec, rng, is_valid_blob=is_valid_blob)
|
||||
if is_valid_blob:
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
|
||||
else:
|
||||
blob_commitment = spec.KZGCommitment()
|
||||
blob_kzg_proof = spec.KZGProof()
|
||||
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
|
||||
blobs.append(blob)
|
||||
blob_kzg_commitments.append(blob_commitment)
|
||||
|
||||
@@ -1,29 +1,31 @@
|
||||
from random import Random
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.forks import is_post_deneb
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
|
||||
|
||||
def prepare_signed_exits(spec, state, indices, fork_version=None):
|
||||
if fork_version is None:
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||
else:
|
||||
domain = spec.compute_domain(spec.DOMAIN_VOLUNTARY_EXIT, fork_version, state.genesis_validators_root)
|
||||
|
||||
def create_signed_exit(index):
|
||||
exit = spec.VoluntaryExit(
|
||||
voluntary_exit = spec.VoluntaryExit(
|
||||
epoch=spec.get_current_epoch(state),
|
||||
validator_index=index,
|
||||
)
|
||||
signing_root = spec.compute_signing_root(exit, domain)
|
||||
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||
return sign_voluntary_exit(spec, state, voluntary_exit, privkeys[index], fork_version=fork_version)
|
||||
|
||||
return [create_signed_exit(index) for index in indices]
|
||||
|
||||
|
||||
def sign_voluntary_exit(spec, state, voluntary_exit, privkey, fork_version=None):
|
||||
if fork_version is None:
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||
if is_post_deneb(spec):
|
||||
domain = spec.compute_domain(
|
||||
spec.DOMAIN_VOLUNTARY_EXIT,
|
||||
spec.config.CAPELLA_FORK_VERSION,
|
||||
state.genesis_validators_root,
|
||||
)
|
||||
else:
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||
else:
|
||||
domain = spec.compute_domain(spec.DOMAIN_VOLUNTARY_EXIT, fork_version, state.genesis_validators_root)
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ def process_and_sign_block_without_header_validations(spec, state, block):
|
||||
)
|
||||
if is_post_bellatrix(spec):
|
||||
if spec.is_execution_enabled(state, block.body):
|
||||
spec.process_execution_payload(state, block.body.execution_payload, spec.EXECUTION_ENGINE)
|
||||
spec.process_execution_payload(state, block.body, spec.EXECUTION_ENGINE)
|
||||
|
||||
# Perform rest of process_block transitions
|
||||
spec.process_randao(state, block.body)
|
||||
|
||||
@@ -75,7 +75,15 @@ def test_time(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_networking(spec, state):
|
||||
assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT
|
||||
assert spec.config.MIN_EPOCHS_FOR_BLOCK_REQUESTS == (
|
||||
spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + spec.config.CHURN_LIMIT_QUOTIENT // 2
|
||||
)
|
||||
assert spec.config.ATTESTATION_SUBNET_PREFIX_BITS == (
|
||||
spec.ceillog2(spec.config.ATTESTATION_SUBNET_COUNT) + spec.config.ATTESTATION_SUBNET_EXTRA_BITS
|
||||
)
|
||||
assert spec.config.SUBNETS_PER_NODE <= spec.config.ATTESTATION_SUBNET_COUNT
|
||||
node_id_length = spec.NodeID(1).type_byte_length() # in bytes
|
||||
assert node_id_length * 8 == spec.NODE_ID_BITS # in bits
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
single_phase,
|
||||
spec_state_test,
|
||||
always_bls, with_phases, with_all_phases,
|
||||
spec_test,
|
||||
always_bls,
|
||||
with_phases,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
|
||||
@@ -365,7 +371,7 @@ def test_compute_subnet_for_attestation(spec, state):
|
||||
|
||||
slots_since_epoch_start = slot % spec.SLOTS_PER_EPOCH
|
||||
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
expected_subnet_id = (committees_since_epoch_start + committee_idx) % spec.ATTESTATION_SUBNET_COUNT
|
||||
expected_subnet_id = (committees_since_epoch_start + committee_idx) % spec.config.ATTESTATION_SUBNET_COUNT
|
||||
|
||||
assert actual_subnet_id == expected_subnet_id
|
||||
|
||||
@@ -476,3 +482,34 @@ def test_get_aggregate_and_proof_signature(spec, state):
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)):
|
||||
node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT
|
||||
epoch = rng.randint(0, 2**64 - 1)
|
||||
subnets = spec.compute_subscribed_subnets(node_id, epoch)
|
||||
assert len(subnets) == spec.config.SUBNETS_PER_NODE
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_1(spec):
|
||||
rng = random.Random(1111)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_2(spec):
|
||||
rng = random.Random(2222)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_compute_subscribed_subnets_random_3(spec):
|
||||
rng = random.Random(3333)
|
||||
run_compute_subscribed_subnets_arguments(spec, rng)
|
||||
|
||||
@@ -10,20 +10,13 @@ from typing import (
|
||||
from pathlib import Path
|
||||
|
||||
from eth_utils import encode_hex
|
||||
from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
||||
G1,
|
||||
G2,
|
||||
Z1,
|
||||
Z2,
|
||||
curve_order as BLS_MODULUS,
|
||||
add,
|
||||
multiply,
|
||||
neg,
|
||||
)
|
||||
from py_ecc.typing import (
|
||||
Optimized_Point3D,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import (
|
||||
BLS_MODULUS,
|
||||
)
|
||||
|
||||
|
||||
PRIMITIVE_ROOT_OF_UNITY = 7
|
||||
@@ -35,7 +28,7 @@ def generate_setup(generator: Optimized_Point3D, secret: int, length: int) -> Tu
|
||||
"""
|
||||
result = [generator]
|
||||
for _ in range(1, length):
|
||||
result.append(multiply(result[-1], secret))
|
||||
result.append(bls.multiply(result[-1], secret))
|
||||
return tuple(result)
|
||||
|
||||
|
||||
@@ -49,9 +42,9 @@ def fft(vals: Sequence[Optimized_Point3D], modulus: int, domain: int) -> Sequenc
|
||||
R = fft(vals[1::2], modulus, domain[::2])
|
||||
o = [0] * len(vals)
|
||||
for i, (x, y) in enumerate(zip(L, R)):
|
||||
y_times_root = multiply(y, domain[i])
|
||||
o[i] = add(x, y_times_root)
|
||||
o[i + len(L)] = add(x, neg(y_times_root))
|
||||
y_times_root = bls.multiply(y, domain[i])
|
||||
o[i] = bls.add(x, y_times_root)
|
||||
o[i + len(L)] = bls.add(x, bls.neg(y_times_root))
|
||||
return o
|
||||
|
||||
|
||||
@@ -90,12 +83,14 @@ def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]:
|
||||
# TODO: introduce an IFFT function for simplicity
|
||||
fft_output = fft(setup, BLS_MODULUS, domain)
|
||||
inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS)
|
||||
return tuple(bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
return tuple(bls.G1_to_bytes48(bls.multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
|
||||
|
||||
def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None:
|
||||
setup_g1 = generate_setup(bls.G1, secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2, secret, g2_length)
|
||||
bls.use_fastest()
|
||||
|
||||
setup_g1 = generate_setup(bls.G1(), secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2(), secret, g2_length)
|
||||
setup_g1_lagrange = get_lagrange(setup_g1)
|
||||
roots_of_unity = compute_roots_of_unity(g1_length)
|
||||
|
||||
|
||||
@@ -114,8 +114,8 @@ Optional step for optimistic sync tests.
|
||||
|
||||
This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#PayloadStatusV1)
|
||||
value that Execution Layer client mock returns in responses to the following Engine API calls:
|
||||
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
|
||||
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
|
||||
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
|
||||
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
|
||||
|
||||
*Note:* Status of a payload must be *initialized* via `on_payload_info` before the corresponding `on_block` execution step.
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ output: Tuple[KZGProof, Bytes32] -- The KZG proof and the value y = f(z)
|
||||
```
|
||||
|
||||
- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
|
||||
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
|
||||
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
|
||||
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a big endian encoded field element, prefixed with `0x`.
|
||||
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a big endian encoded field element, prefixed with `0x`.
|
||||
|
||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ input:
|
||||
output: bool -- true (valid proof) or false (incorrect proof)
|
||||
```
|
||||
|
||||
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
|
||||
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
|
||||
- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a big endian encoded field element, prefixed with `0x`.
|
||||
- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a big endian encoded field element, prefixed with `0x`.
|
||||
|
||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ Operations:
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_aggregate(state, sync_aggregate)` (new in Altair) |
|
||||
| `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Bellatrix) |
|
||||
| `execution_payload` | `BeaconBlockBody` | **`body`** | `process_execution_payload(state, body)` (new in Bellatrix) |
|
||||
| `withdrawals` | `ExecutionPayload` | `execution_payload` | `process_withdrawals(state, execution_payload)` (new in Capella) |
|
||||
| `bls_to_execution_change` | `SignedBLSToExecutionChange` | `address_change` | `process_bls_to_execution_change(state, address_change)` (new in Capella) |
|
||||
| `deposit_receipt` | `DepositReceipt` | `deposit_receipt` | `process_deposit_receipt(state, deposit_receipt)` (new in EIP6110) |
|
||||
|
||||
@@ -27,11 +27,11 @@ def expect_exception(func, *args):
|
||||
|
||||
|
||||
def field_element_bytes(x):
|
||||
return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.ENDIANNESS)
|
||||
return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.KZG_ENDIANNESS)
|
||||
|
||||
|
||||
def field_element_bytes_unchecked(x):
|
||||
return int.to_bytes(x, 32, spec.ENDIANNESS)
|
||||
return int.to_bytes(x, 32, spec.KZG_ENDIANNESS)
|
||||
|
||||
|
||||
def encode_hex_list(a):
|
||||
@@ -54,7 +54,7 @@ def evaluate_blob_at(blob, z):
|
||||
)
|
||||
|
||||
|
||||
BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS)
|
||||
BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.KZG_ENDIANNESS)
|
||||
|
||||
G1 = bls.G1_to_bytes48(bls.G1())
|
||||
G1_INVALID_TOO_FEW_BYTES = G1[:-1]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110
|
||||
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators
|
||||
|
||||
|
||||
@@ -15,14 +15,12 @@ if __name__ == "__main__":
|
||||
]}
|
||||
capella_mods = combine_mods(_new_capella_mods, bellatrix_mods)
|
||||
deneb_mods = capella_mods
|
||||
eip6110_mods = deneb_mods
|
||||
|
||||
all_mods = {
|
||||
ALTAIR: altair_mods,
|
||||
BELLATRIX: bellatrix_mods,
|
||||
CAPELLA: capella_mods,
|
||||
DENEB: deneb_mods,
|
||||
EIP6110: eip6110_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="light_client", all_mods=all_mods)
|
||||
|
||||
@@ -30,13 +30,18 @@ if __name__ == "__main__":
|
||||
bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods)
|
||||
|
||||
_new_capella_mods = {key: 'eth2spec.test.capella.block_processing.test_process_' + key for key in [
|
||||
'deposit',
|
||||
'bls_to_execution_change',
|
||||
'deposit',
|
||||
'execution_payload',
|
||||
'withdrawals',
|
||||
]}
|
||||
capella_mods = combine_mods(_new_capella_mods, bellatrix_mods)
|
||||
|
||||
deneb_mods = capella_mods
|
||||
_new_deneb_mods = {key: 'eth2spec.test.deneb.block_processing.test_process_' + key for key in [
|
||||
'execution_payload',
|
||||
'voluntary_exit',
|
||||
]}
|
||||
deneb_mods = combine_mods(_new_deneb_mods, capella_mods)
|
||||
|
||||
_new_eip6110_mods = {key: 'eth2spec.test.eip6110.block_processing.test_process_' + key for key in [
|
||||
'deposit_receipt',
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from eth_utils import to_tuple
|
||||
from typing import Iterable
|
||||
import random
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.test.helpers.typing import PresetBaseName
|
||||
@@ -8,6 +8,16 @@ from eth2spec.phase0 import mainnet as spec_mainnet, minimal as spec_minimal
|
||||
from eth2spec.test.helpers.constants import PHASE0, MINIMAL, MAINNET
|
||||
|
||||
|
||||
def generate_random_bytes(rng=random.Random(5566)):
|
||||
random_bytes = bytes(rng.randint(0, 255) for _ in range(32))
|
||||
return random_bytes
|
||||
|
||||
|
||||
# NOTE: somehow the random.Random generated seeds do not have pickle issue.
|
||||
rng = random.Random(1234)
|
||||
seeds = [generate_random_bytes(rng) for i in range(30)]
|
||||
|
||||
|
||||
def shuffling_case_fn(spec, seed, count):
|
||||
yield 'mapping', 'data', {
|
||||
'seed': '0x' + seed.hex(),
|
||||
@@ -20,9 +30,8 @@ def shuffling_case(spec, seed, count):
|
||||
return f'shuffle_0x{seed.hex()}_{count}', lambda: shuffling_case_fn(spec, seed, count)
|
||||
|
||||
|
||||
@to_tuple
|
||||
def shuffling_test_cases(spec):
|
||||
for seed in [spec.hash(seed_init_value.to_bytes(length=4, byteorder='little')) for seed_init_value in range(30)]:
|
||||
for seed in seeds:
|
||||
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]:
|
||||
yield shuffling_case(spec, seed, count)
|
||||
|
||||
@@ -47,11 +56,13 @@ def create_provider(preset_name: PresetBaseName) -> gen_typing.TestProvider:
|
||||
handler_name='core',
|
||||
suite_name='shuffle',
|
||||
case_name=case_name,
|
||||
case_fn=case_fn
|
||||
case_fn=case_fn,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("shuffling", [create_provider(MINIMAL), create_provider(MAINNET)])
|
||||
gen_runner.run_generator("shuffling", [
|
||||
create_provider(MINIMAL), create_provider(MAINNET)]
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user