mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-02-19 09:34:21 -05:00
Merge branch 'dev'
This commit is contained in:
@@ -7,27 +7,30 @@ With this executable spec,
|
||||
test-generators can easily create test-vectors for client implementations,
|
||||
and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest.
|
||||
|
||||
## Building
|
||||
|
||||
To build the pyspec: `python setup.py build`
|
||||
(or `pip install .`, but beware that ignored files will still be copied over to a temporary dir, due to pip issue 2195).
|
||||
This outputs the build files to the `./build/lib/eth2spec/...` dir, and can't be used for local test running. Instead, use the dev-install as described below.
|
||||
|
||||
## Dev Install
|
||||
|
||||
All the dynamic parts of the spec are automatically built with `python setup.py pyspecdev`.
|
||||
Unlike the regular install, this outputs spec files to their original source location, instead of build output only.
|
||||
First, create a `venv` and install the developer dependencies (`test` and `lint` extras):
|
||||
|
||||
Alternatively, you can build a sub-set of the pyspec with the distutil command:
|
||||
```bash
|
||||
python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
|
||||
```shell
|
||||
make install_test
|
||||
```
|
||||
|
||||
## Py-tests
|
||||
All the dynamic parts of the spec are built with:
|
||||
|
||||
After installing, you can install the optional dependencies for testing and linting.
|
||||
With makefile: `make install_test`.
|
||||
Or manually: run `pip install .[test]` and `pip install .[lint]`.
|
||||
```shell
|
||||
(venv) python setup.py pyspecdev
|
||||
```
|
||||
|
||||
Unlike the regular install, this outputs spec files to their intended source location,
|
||||
to enable debuggers to navigate between packages and generated code, without fragile directory linking.
|
||||
|
||||
By default, when installing the `eth2spec` as package in non-develop mode,
|
||||
the distutils implementation of the `setup` runs `build`, which is extended to run the same `pyspec` work,
|
||||
but outputs into the standard `./build/lib` output.
|
||||
This enables the `eth2.0-specs` repository to be installed like any other python package.
|
||||
|
||||
|
||||
## Py-tests
|
||||
|
||||
These tests are not intended for client-consumption.
|
||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||
@@ -39,20 +42,32 @@ However, most of the tests can be run in generator-mode, to output test vectors
|
||||
|
||||
Run `make test` from the root of the specs repository (after running `make install_test` if have not before).
|
||||
|
||||
Note that the `make` commands run through the build steps: it runs the `build` output, not the local package source files.
|
||||
|
||||
#### Manual
|
||||
|
||||
From the repository root:
|
||||
See `Dev install` for test pre-requisites.
|
||||
|
||||
Install venv and install:
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
python setup.py pyspecdev
|
||||
Tests are built for `pytest`.
|
||||
|
||||
Caveats:
|
||||
- Working directory must be `./tests/core/pyspec`. The work-directory is important to locate eth2 configuration files.
|
||||
- Run `pytest` as module. It avoids environment differences, and the behavior is different too:
|
||||
`pytest` as module adds the current directory to the `sys.path`
|
||||
|
||||
Full test usage, with explicit configuration for illustration of options usage:
|
||||
```shell
|
||||
(venv) python -m pytest --config=minimal eth2spec
|
||||
```
|
||||
|
||||
Run the test command from the `tests/core/pyspec` directory:
|
||||
Or, to run a specific test file, specify the full path:
|
||||
```shell
|
||||
(venv) python -m pytest --config=minimal ./eth2spec/test/phase0/block_processing/test_process_attestation.py
|
||||
```
|
||||
pytest --config=minimal eth2spec
|
||||
|
||||
Or, to run a specific test function (specify the `eth2spec` module, or the script path if the keyword is ambiguous):
|
||||
```shell
|
||||
(venv) python -m pytest --config=minimal -k test_success_multi_proposer_index_iterations eth2spec
|
||||
```
|
||||
|
||||
Options:
|
||||
@@ -64,6 +79,12 @@ Options:
|
||||
|
||||
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
|
||||
|
||||
### Advanced
|
||||
|
||||
Building spec files from any markdown sources, to a custom location:
|
||||
```bash
|
||||
(venv) python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.1.0-alpha.3
|
||||
1.1.0-alpha.4
|
||||
|
||||
@@ -54,8 +54,7 @@ def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
|
||||
out[k] = [int(item) if item.isdigit() else item for item in v]
|
||||
elif isinstance(v, str) and v.startswith("0x"):
|
||||
out[k] = bytes.fromhex(v[2:])
|
||||
elif k == "CONFIG_NAME":
|
||||
out[k] = str(v)
|
||||
else:
|
||||
out[k] = int(v)
|
||||
out['CONFIG_NAME'] = presets_name
|
||||
return out
|
||||
|
||||
@@ -4,7 +4,8 @@ from typing import Any, Callable, Dict, Iterable, Optional
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import ALL_CONFIGS, TESTGEN_FORKS, SpecForkName, ConfigName
|
||||
from eth2spec.test.helpers.constants import ALL_CONFIGS, TESTGEN_FORKS
|
||||
from eth2spec.test.helpers.typing import SpecForkName, ConfigName
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner
|
||||
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
|
||||
|
||||
@@ -8,14 +8,15 @@ from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MAINNET, MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
MAINNET, MINIMAL,
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
@@ -48,9 +49,9 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||
"""
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee = spec.get_next_sync_committee_indices(state)
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
@@ -60,57 +61,73 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
def compute_committee_indices(spec, state, committee):
|
||||
"""
|
||||
Given a ``committee``, calculate and return the related indices
|
||||
"""
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||
return committee_indices
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_missing_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(2020)
|
||||
random_participant = rng.choice(committee)
|
||||
random_participant = rng.choice(committee_indices)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one participant whose signature was included.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[index != random_participant for index in committee],
|
||||
sync_committee_bits=[index != random_participant for index in committee_indices],
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee, # full committee signs
|
||||
committee_indices, # full committee signs
|
||||
)
|
||||
)
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_extra_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(3030)
|
||||
random_participant = rng.choice(committee)
|
||||
random_participant = rng.choice(committee_indices)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one signature even though the block claims the entire committee participated.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index in committee if index != random_participant],
|
||||
[index for index in committee_indices if index != random_participant],
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
|
||||
def compute_sync_committee_inclusion_reward(spec,
|
||||
state,
|
||||
participant_index,
|
||||
committee_indices,
|
||||
committee_bits):
|
||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(
|
||||
max_epoch_rewards * len(included_indices)
|
||||
// len(committee_indices) // spec.SLOTS_PER_EPOCH
|
||||
)
|
||||
|
||||
# Compute the participant and proposer sync rewards
|
||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||
@@ -119,23 +136,23 @@ def compute_sync_committee_inclusion_reward(spec, state, participant_index, comm
|
||||
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
multiplicities = Counter(included_indices)
|
||||
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, participant_index, committee, committee_bits,
|
||||
spec, state, participant_index, committee_indices, committee_bits,
|
||||
)
|
||||
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
||||
|
||||
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
|
||||
proposer_reward = 0
|
||||
for index, bit in zip(committee, committee_bits):
|
||||
for index, bit in zip(committee_indices, committee_bits):
|
||||
if not bit:
|
||||
continue
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, index, committee, committee_bits,
|
||||
spec, state, index, committee_indices, committee_bits,
|
||||
)
|
||||
proposer_reward_denominator = (
|
||||
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
||||
@@ -146,30 +163,30 @@ def compute_sync_committee_proposer_reward(spec, state, committee, committee_bit
|
||||
return proposer_reward
|
||||
|
||||
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
|
||||
for index in range(len(post_state.validators)):
|
||||
reward = 0
|
||||
if index in committee:
|
||||
if index in committee_indices:
|
||||
reward += compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
index,
|
||||
committee,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
committee,
|
||||
committee_bits,
|
||||
)
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||
|
||||
|
||||
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||
pre_state = state.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@@ -179,7 +196,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index, bit in zip(committee, committee_bits) if bit],
|
||||
[index for index, bit in zip(committee_indices, committee_bits) if bit],
|
||||
)
|
||||
)
|
||||
|
||||
@@ -189,60 +206,70 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
spec,
|
||||
pre_state,
|
||||
state,
|
||||
committee,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
block.proposer_index,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
committee_indices = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
assert committee_size == len(set(committee_indices))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
assert committee_size > len(set(committee_indices))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(1010)
|
||||
committee_bits = [rng.choice([True, False]) for _ in committee]
|
||||
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_sync_committee_rewards_empty_participants(spec, state):
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
committee_bits = [False for _ in committee_indices]
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
|
||||
blocks = []
|
||||
for _ in range(2):
|
||||
@@ -250,12 +277,12 @@ def test_invalid_signature_past_block(spec, state):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Valid sync committee signature here...
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -265,19 +292,19 @@ def test_invalid_signature_past_block(spec, state):
|
||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Invalid signature from a slot other than the previous
|
||||
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
invalid_block.slot - 2,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@@ -295,26 +322,25 @@ def test_invalid_signature_previous_committee(spec, state):
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
committee_indices = compute_committee_indices(spec, state, old_sync_committee)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@@ -333,15 +359,13 @@ def test_valid_signature_future_committee(spec, state):
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
sync_committee = state.current_sync_committee
|
||||
next_sync_committee = state.next_sync_committee
|
||||
|
||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||
|
||||
assert sync_committee == expected_sync_committee
|
||||
assert next_sync_committee != sync_committee
|
||||
assert sync_committee != old_current_sync_committee
|
||||
assert sync_committee != old_next_sync_committee
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||
committee_indices = compute_committee_indices(spec, state, sync_committee)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.context import spec_state_test, with_altair_and_later
|
||||
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch_via_block,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
from eth2spec.test.helpers.random import (
|
||||
randomize_attestation_participation,
|
||||
)
|
||||
|
||||
|
||||
def set_full_participation(spec, state):
|
||||
full_flags = spec.ParticipationFlags(0)
|
||||
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
|
||||
full_flags = spec.add_flag(full_flags, flag_index)
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
state.current_epoch_participation[index] = full_flags
|
||||
state.previous_epoch_participation[index] = full_flags
|
||||
|
||||
|
||||
def run_process_inactivity_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_inactivity_updates')
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_genesis(spec, state):
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
#
|
||||
# Genesis epoch processing is skipped
|
||||
# Thus all of following tests all go past genesis epoch to test core functionality
|
||||
#
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_all_zero_inactivity_scores_empty_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
state.inactivity_scores = [0] * len(state.validators)
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_all_zero_inactivity_scores_random_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
state.inactivity_scores = [0] * len(state.validators)
|
||||
randomize_attestation_participation(spec, state, rng=Random(5555))
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_all_zero_inactivity_scores_full_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
set_full_participation(spec, state)
|
||||
state.inactivity_scores = [0] * len(state.validators)
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_inactivity_scores_empty_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
randomize_inactivity_scores(spec, state, rng=Random(9999))
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_inactivity_scores_random_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
randomize_attestation_participation(spec, state, rng=Random(22222))
|
||||
randomize_inactivity_scores(spec, state, rng=Random(22222))
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_inactivity_scores_full_participation(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
set_full_participation(spec, state)
|
||||
randomize_inactivity_scores(spec, state, rng=Random(33333))
|
||||
yield from run_process_inactivity_updates(spec, state)
|
||||
@@ -1,30 +1,35 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
MINIMAL,
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
spec_test,
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
with_custom_state,
|
||||
single_phase,
|
||||
misc_balances,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_progress(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# NOTE: if not in the genesis epoch, period math below needs to be
|
||||
# adjusted relative to the current epoch
|
||||
assert current_epoch == 0
|
||||
#
|
||||
# Note:
|
||||
# Calculating sync committees requires pubkey aggregation, thus all tests are generated with `always_bls`
|
||||
#
|
||||
|
||||
def run_sync_committees_progress_test(spec, state):
|
||||
first_sync_committee = state.current_sync_committee
|
||||
second_sync_committee = state.next_sync_committee
|
||||
|
||||
slot_at_end_of_current_period = spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - 1
|
||||
transition_to(spec, state, slot_at_end_of_current_period)
|
||||
current_period = spec.get_current_epoch(state) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
next_period = current_period + 1
|
||||
next_period_start_epoch = next_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
next_period_start_slot = next_period_start_epoch * spec.SLOTS_PER_EPOCH
|
||||
end_slot_of_current_period = next_period_start_slot - 1
|
||||
transition_to(spec, state, end_slot_of_current_period)
|
||||
|
||||
# Ensure assignments have not changed:
|
||||
assert state.current_sync_committee == first_sync_committee
|
||||
@@ -34,7 +39,41 @@ def test_sync_committees_progress(spec, state):
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||
third_sync_committee = spec.get_sync_committee(state, 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
third_sync_committee = spec.get_next_sync_committee(state)
|
||||
|
||||
assert state.current_sync_committee == second_sync_committee
|
||||
assert state.next_sync_committee == third_sync_committee
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_progress_genesis(spec, state):
|
||||
# Genesis epoch period has an exceptional case
|
||||
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||
|
||||
yield from run_sync_committees_progress_test(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_progress_not_genesis(spec, state):
|
||||
# Transition out of the genesis epoch period to test non-exceptional case
|
||||
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||
slot_in_next_period = state.slot + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_next_period)
|
||||
|
||||
yield from run_sync_committees_progress_test(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_progress_misc_balances(spec, state):
|
||||
yield from run_sync_committees_progress_test(spec, state)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, ALTAIR,
|
||||
MINIMAL,
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_configs,
|
||||
@@ -8,51 +6,18 @@ from eth2spec.test.context import (
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
|
||||
|
||||
ALTAIR_FORK_TEST_META_TAGS = {
|
||||
'fork': 'altair',
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_altair(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
from eth2spec.test.helpers.altair.fork import (
|
||||
ALTAIR_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@@ -0,0 +1,120 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_configs,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.altair.fork import (
|
||||
ALTAIR_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import (
|
||||
randomize_state,
|
||||
randomize_attestation_participation,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_0(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1010))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_1(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(2020))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_2(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(3030))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_3(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(4040))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_duplicate_attestations(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1111))
|
||||
# Note: `run_fork_test` empties `current_epoch_attestations`
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_mismatched_attestations(spec, phases, state):
|
||||
# Create a random state
|
||||
randomize_state(spec, state, rng=Random(2222))
|
||||
|
||||
# Now make two copies
|
||||
state_0 = state.copy()
|
||||
state_1 = state.copy()
|
||||
|
||||
# Randomize attestation participation of both
|
||||
randomize_attestation_participation(spec, state_0, rng=Random(3333))
|
||||
randomize_attestation_participation(spec, state_1, rng=Random(4444))
|
||||
|
||||
# Note: `run_fork_test` empties `current_epoch_attestations`
|
||||
# Use pending attestations from both random states in a single state for testing
|
||||
state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations
|
||||
yield from run_fork_test(phases[ALTAIR], state_0)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_low_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(5050))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_misc_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(6060))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||
@with_configs([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||
def test_altair_fork_random_large_validator_set(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(7070))
|
||||
yield from run_fork_test(phases[ALTAIR], state)
|
||||
@@ -0,0 +1,118 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_altair_and_later,
|
||||
spec_test,
|
||||
spec_state_test,
|
||||
with_custom_state,
|
||||
single_phase,
|
||||
low_balances, misc_balances,
|
||||
)
|
||||
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_inactivity_scores_0(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(9999))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_inactivity_scores_1(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(10000))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_zero_half_random_inactivity_scores(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(10101))
|
||||
half_val_point = len(state.validators) // 2
|
||||
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
|
||||
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_random_high_inactivity_scores(spec, state):
|
||||
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_random_inactivity_scores_low_balances_0(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(11111))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_random_inactivity_scores_low_balances_1(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(22222))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_full_random_misc_balances(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(33333))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||
|
||||
|
||||
#
|
||||
# Leaking variants
|
||||
#
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_random_inactivity_scores_leaking_0(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(9999))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_random_inactivity_scores_leaking_1(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(10000))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_half_zero_half_random_inactivity_scores_leaking(spec, state):
|
||||
randomize_inactivity_scores(spec, state, rng=Random(10101))
|
||||
half_val_point = len(state.validators) // 2
|
||||
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
|
||||
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_random_high_inactivity_scores_leaking(spec, state):
|
||||
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@leaking(epochs=5)
|
||||
def test_random_high_inactivity_scores_leaking_5_epochs(spec, state):
|
||||
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
|
||||
@@ -12,14 +12,14 @@ from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||
|
||||
yield 'pre', state
|
||||
@@ -40,46 +40,46 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_inactivity_scores(spec, state):
|
||||
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):
|
||||
|
||||
@@ -0,0 +1,244 @@
|
||||
from eth2spec.test.context import fork_transition_test
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
|
||||
|
||||
|
||||
def _state_transition_and_sign_block_at_slot(spec, state):
|
||||
"""
|
||||
Cribbed from ``transition_unsigned_block`` helper
|
||||
where the early parts of the state transition have already
|
||||
been applied to ``state``.
|
||||
|
||||
Used to produce a block during an irregular state transition.
|
||||
"""
|
||||
block = build_empty_block(spec, state)
|
||||
|
||||
assert state.latest_block_header.slot < block.slot
|
||||
assert state.slot == block.slot
|
||||
spec.process_block(state, block)
|
||||
block.state_root = state.hash_tree_root()
|
||||
return sign_block(spec, state, block)
|
||||
|
||||
|
||||
def _all_blocks(_):
|
||||
return True
|
||||
|
||||
|
||||
def _skip_slots(*slots):
|
||||
"""
|
||||
Skip making a block if its slot is
|
||||
passed as an argument to this filter
|
||||
"""
|
||||
def f(state_at_prior_slot):
|
||||
return state_at_prior_slot.slot + 1 not in slots
|
||||
return f
|
||||
|
||||
|
||||
def _no_blocks(_):
|
||||
return False
|
||||
|
||||
|
||||
def _only_at(slot):
|
||||
"""
|
||||
Only produce a block if its slot is ``slot``.
|
||||
"""
|
||||
def f(state_at_prior_slot):
|
||||
return state_at_prior_slot.slot + 1 == slot
|
||||
return f
|
||||
|
||||
|
||||
def _state_transition_across_slots(spec, state, to_slot, block_filter=_all_blocks):
|
||||
assert state.slot < to_slot
|
||||
while state.slot < to_slot:
|
||||
should_make_block = block_filter(state)
|
||||
if should_make_block:
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield signed_block
|
||||
else:
|
||||
next_slot(spec, state)
|
||||
|
||||
|
||||
def _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=True):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
assert spec.compute_epoch_at_slot(state.slot) == fork_epoch
|
||||
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
assert state.fork.previous_version == post_spec.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state)
|
||||
else:
|
||||
return state, None
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
assert set(range(1, state.slot + 1)) == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way except for the first block
|
||||
of the new fork.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
expected_slots = set(range(1, state.slot + 1)).difference(set([fork_epoch * spec.SLOTS_PER_EPOCH]))
|
||||
assert expected_slots == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way except for the last block
|
||||
of the old fork.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
to_slot = last_slot_of_pre_fork
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot, block_filter=_skip_slots(last_slot_of_pre_fork))
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
expected_slots = set(range(1, state.slot + 1)).difference(set([last_slot_of_pre_fork]))
|
||||
assert expected_slots == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
skipping blocks for every slot along the way except for the first block
|
||||
in the ending epoch.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
to_slot = last_slot_of_pre_fork
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot, block_filter=_no_blocks)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
last_slot = (fork_epoch + 1) * post_spec.SLOTS_PER_EPOCH
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot, block_filter=_only_at(last_slot))
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(slots_with_blocks) == 1
|
||||
assert slots_with_blocks[0] == last_slot
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
@@ -1,8 +1,8 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
ALTAIR,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import ALTAIR
|
||||
from eth2spec.test.helpers.merkle import build_proof
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from eth2spec.test.context import (
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_configs,
|
||||
with_phases,
|
||||
@@ -10,6 +8,10 @@ from eth2spec.test.helpers.block import (
|
||||
build_empty_block,
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
state_transition_and_sign_block,
|
||||
@@ -30,7 +32,7 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Block at slot 1 doesn't increase sync committee period, so it won't update snapshot
|
||||
@@ -44,7 +46,8 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
body_root=signed_block.message.body.hash_tree_root(),
|
||||
)
|
||||
# Sync committee signing the header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
@@ -74,7 +77,7 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||
|
||||
assert len(store.valid_updates) == 1
|
||||
assert store.valid_updates[0] == update
|
||||
assert store.valid_updates.pop() == update
|
||||
assert store.snapshot == pre_snapshot
|
||||
|
||||
|
||||
@@ -89,7 +92,7 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Forward to next sync committee period
|
||||
@@ -109,7 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
@@ -154,7 +158,7 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Change finality
|
||||
@@ -188,7 +192,8 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitvector
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
from eth2spec.test.context import (
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
with_state,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
)
|
||||
|
||||
rng = random.Random(1337)
|
||||
|
||||
|
||||
def ensure_assignments_in_sync_committee(
|
||||
spec, state, epoch, sync_committee, active_pubkeys
|
||||
):
|
||||
assert len(sync_committee.pubkeys) >= 3
|
||||
some_pubkeys = rng.sample(sync_committee.pubkeys, 3)
|
||||
for pubkey in some_pubkeys:
|
||||
validator_index = active_pubkeys.index(pubkey)
|
||||
assert spec.is_assigned_to_sync_committee(state, epoch, validator_index)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_state
|
||||
def test_is_assigned_to_sync_committee(phases, spec, state):
|
||||
epoch = spec.get_current_epoch(state)
|
||||
validator_indices = spec.get_active_validator_indices(state, epoch)
|
||||
validator_count = len(validator_indices)
|
||||
|
||||
query_epoch = epoch + 1
|
||||
next_query_epoch = query_epoch + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
active_pubkeys = [state.validators[index].pubkey for index in validator_indices]
|
||||
|
||||
ensure_assignments_in_sync_committee(
|
||||
spec, state, query_epoch, state.current_sync_committee, active_pubkeys
|
||||
)
|
||||
ensure_assignments_in_sync_committee(
|
||||
spec, state, next_query_epoch, state.next_sync_committee, active_pubkeys
|
||||
)
|
||||
|
||||
sync_committee_pubkeys = set(
|
||||
list(state.current_sync_committee.pubkeys)
|
||||
+ list(state.next_sync_committee.pubkeys)
|
||||
)
|
||||
disqualified_pubkeys = set(
|
||||
filter(lambda key: key not in sync_committee_pubkeys, active_pubkeys)
|
||||
)
|
||||
# NOTE: only check `disqualified_pubkeys` if SYNC_COMMITEE_SIZE < validator count
|
||||
if disqualified_pubkeys:
|
||||
sample_size = 3
|
||||
assert validator_count >= sample_size
|
||||
some_pubkeys = rng.sample(disqualified_pubkeys, sample_size)
|
||||
for pubkey in some_pubkeys:
|
||||
validator_index = active_pubkeys.index(pubkey)
|
||||
is_current = spec.is_assigned_to_sync_committee(
|
||||
state, query_epoch, validator_index
|
||||
)
|
||||
is_next = spec.is_assigned_to_sync_committee(
|
||||
state, next_query_epoch, validator_index
|
||||
)
|
||||
is_current_or_next = is_current or is_next
|
||||
assert not is_current_or_next
|
||||
|
||||
|
||||
def _get_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
target_slot,
|
||||
target_block_root,
|
||||
subcommittee_index,
|
||||
index_in_subcommittee,
|
||||
):
|
||||
subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
|
||||
sync_committee_index = (
|
||||
subcommittee_index * subcommittee_size + index_in_subcommittee
|
||||
)
|
||||
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
|
||||
privkey = pubkey_to_privkey[pubkey]
|
||||
|
||||
domain = spec.get_domain(
|
||||
state,
|
||||
spec.DOMAIN_SYNC_COMMITTEE,
|
||||
)
|
||||
signing_data = spec.compute_signing_root(target_block_root, domain)
|
||||
return bls.Sign(privkey, spec.hash_tree_root(signing_data))
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_process_sync_committee_contributions(phases, spec, state):
|
||||
# skip over slots at genesis
|
||||
transition_to(spec, state, state.slot + 3)
|
||||
|
||||
# build a block and attempt to assemble a sync aggregate
|
||||
# from some sync committee contributions
|
||||
block = build_empty_block(spec, state)
|
||||
previous_slot = state.slot - 1
|
||||
target_block_root = spec.get_block_root_at_slot(state, previous_slot)
|
||||
aggregation_bits = Bitvector[
|
||||
spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
|
||||
]()
|
||||
aggregation_index = 0
|
||||
aggregation_bits[aggregation_index] = True
|
||||
|
||||
contributions = [
|
||||
spec.SyncCommitteeContribution(
|
||||
slot=block.slot,
|
||||
beacon_block_root=target_block_root,
|
||||
subcommittee_index=i,
|
||||
aggregation_bits=aggregation_bits,
|
||||
signature=_get_sync_committee_signature(
|
||||
spec, state, previous_slot, target_block_root, i, aggregation_index
|
||||
),
|
||||
)
|
||||
for i in range(spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||
]
|
||||
|
||||
# ensure the block has an empty sync aggregate...
|
||||
empty_sync_aggregate = spec.SyncAggregate()
|
||||
empty_sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
assert block.body.sync_aggregate == empty_sync_aggregate
|
||||
spec.process_sync_committee_contributions(block, set(contributions))
|
||||
|
||||
# and that after processing, it is no longer empty
|
||||
assert len(block.body.sync_aggregate.sync_committee_bits) != 0
|
||||
assert (
|
||||
block.body.sync_aggregate.sync_committee_signature != spec.G2_POINT_AT_INFINITY
|
||||
)
|
||||
# moreover, ensure the sync aggregate is valid if the block is accepted
|
||||
spec.process_block(state, block)
|
||||
|
||||
|
||||
def _validator_index_for_pubkey(state, pubkey):
|
||||
return list(map(lambda v: v.pubkey, state.validators)).index(pubkey)
|
||||
|
||||
|
||||
def _subnet_for_sync_committee_index(spec, i):
|
||||
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||
|
||||
|
||||
def _get_expected_subnets_by_pubkey(sync_committee_members):
|
||||
# Build deduplicated set for each pubkey
|
||||
expected_subnets_by_pubkey = defaultdict(set)
|
||||
for (subnet, pubkey) in sync_committee_members:
|
||||
expected_subnets_by_pubkey[pubkey].add(subnet)
|
||||
return expected_subnets_by_pubkey
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_compute_subnets_for_sync_committee(state, spec, phases):
|
||||
# Transition to the head of the next period
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||
assert (
|
||||
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||
== spec.compute_sync_committee_period(next_slot_epoch)
|
||||
)
|
||||
some_sync_committee_members = list(
|
||||
(
|
||||
_subnet_for_sync_committee_index(spec, i),
|
||||
pubkey,
|
||||
)
|
||||
# use current_sync_committee
|
||||
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||
)
|
||||
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||
|
||||
for _, pubkey in some_sync_committee_members:
|
||||
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
|
||||
expected_subnets = expected_subnets_by_pubkey[pubkey]
|
||||
assert subnets == expected_subnets
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
|
||||
# Transition to the end of the period
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
|
||||
|
||||
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||
assert (
|
||||
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||
!= spec.compute_sync_committee_period(next_slot_epoch)
|
||||
)
|
||||
some_sync_committee_members = list(
|
||||
(
|
||||
_subnet_for_sync_committee_index(spec, i),
|
||||
pubkey,
|
||||
)
|
||||
# use next_sync_committee
|
||||
for i, pubkey in enumerate(state.next_sync_committee.pubkeys)
|
||||
)
|
||||
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||
|
||||
for _, pubkey in some_sync_committee_members:
|
||||
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
|
||||
expected_subnets = expected_subnets_by_pubkey[pubkey]
|
||||
assert subnets == expected_subnets
|
||||
@@ -2,14 +2,19 @@ import pytest
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR, MERGE,
|
||||
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
|
||||
)
|
||||
from .helpers.genesis import create_genesis_state
|
||||
from .utils import vector_test, with_meta_tags
|
||||
from .utils import vector_test, with_meta_tags, build_transition_test
|
||||
|
||||
from random import Random
|
||||
from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
||||
|
||||
from lru import LRU
|
||||
|
||||
@@ -19,32 +24,9 @@ from importlib import reload
|
||||
def reload_specs():
|
||||
reload(spec_phase0)
|
||||
reload(spec_altair)
|
||||
reload(spec_merge)
|
||||
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
|
||||
SpecForkName = NewType("SpecForkName", str)
|
||||
ConfigName = NewType("ConfigName", str)
|
||||
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
ALTAIR = SpecForkName('altair')
|
||||
|
||||
# Experimental phases (not included in default "ALL_PHASES"):
|
||||
MERGE = SpecForkName('merge')
|
||||
SHARDING = SpecForkName('sharding')
|
||||
CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
|
||||
ALL_PHASES = (PHASE0, ALTAIR)
|
||||
|
||||
MAINNET = ConfigName('mainnet')
|
||||
MINIMAL = ConfigName('minimal')
|
||||
|
||||
ALL_CONFIGS = (MINIMAL, MAINNET)
|
||||
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR)
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
|
||||
@@ -61,24 +43,23 @@ class SpecAltair(Spec):
|
||||
...
|
||||
|
||||
|
||||
class SpecMerge(Spec):
|
||||
...
|
||||
|
||||
|
||||
class SpecForks(TypedDict, total=False):
|
||||
PHASE0: SpecPhase0
|
||||
ALTAIR: SpecAltair
|
||||
MERGE: SpecMerge
|
||||
|
||||
|
||||
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||
spec: Spec, phases: SpecForks):
|
||||
|
||||
p0 = phases[PHASE0]
|
||||
balances = balances_fn(p0)
|
||||
activation_threshold = threshold_fn(p0)
|
||||
|
||||
state = create_genesis_state(spec=p0, validator_balances=balances,
|
||||
phase = phases[spec.fork]
|
||||
balances = balances_fn(phase)
|
||||
activation_threshold = threshold_fn(phase)
|
||||
state = create_genesis_state(spec=phase, validator_balances=balances,
|
||||
activation_threshold=activation_threshold)
|
||||
# TODO: upgrade to merge spec, and later sharding.
|
||||
if spec.fork == ALTAIR:
|
||||
state = phases[ALTAIR].upgrade_to_altair(state)
|
||||
|
||||
return state
|
||||
|
||||
|
||||
@@ -331,7 +312,7 @@ def with_phases(phases, other_phases=None):
|
||||
return None
|
||||
run_phases = [phase]
|
||||
|
||||
if PHASE0 not in run_phases and ALTAIR not in run_phases:
|
||||
if PHASE0 not in run_phases and ALTAIR not in run_phases and MERGE not in run_phases:
|
||||
dump_skipping_message("none of the recognized phases are executable, skipping test.")
|
||||
return None
|
||||
|
||||
@@ -349,6 +330,8 @@ def with_phases(phases, other_phases=None):
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
if ALTAIR in available_phases:
|
||||
phase_dir[ALTAIR] = spec_altair
|
||||
if MERGE in available_phases:
|
||||
phase_dir[MERGE] = spec_merge
|
||||
|
||||
# return is ignored whenever multiple phases are ran.
|
||||
# This return is for test generators to emit python generators (yielding test vector outputs)
|
||||
@@ -356,6 +339,8 @@ def with_phases(phases, other_phases=None):
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if ALTAIR in run_phases:
|
||||
ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw)
|
||||
if MERGE in run_phases:
|
||||
ret = fn(spec=spec_merge, phases=phase_dir, *args, **kw)
|
||||
|
||||
# TODO: merge, sharding, custody_game and das are not executable yet.
|
||||
# Tests that specify these features will not run, and get ignored for these specific phases.
|
||||
@@ -381,8 +366,55 @@ def with_configs(configs, reason=None):
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
# TODO: everything runs in parallel to Altair.
|
||||
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
|
||||
if spec.fork in [PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS]:
|
||||
if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||
return False
|
||||
if spec.fork in FORKS_BEFORE_ALTAIR:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_post_merge(spec):
|
||||
if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||
return False
|
||||
if spec.fork in FORKS_BEFORE_MERGE:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
|
||||
with_merge_and_later = with_phases([MERGE])
|
||||
|
||||
|
||||
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
"""
|
||||
A decorator to construct a "transition" test from one fork of the eth2 spec
|
||||
to another.
|
||||
|
||||
Decorator assumes a transition from the `pre_fork_name` fork to the
|
||||
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
|
||||
fork occurs or they must compute one (yielding to the generator) during the test
|
||||
if more custom behavior is desired.
|
||||
|
||||
A test using this decorator should expect to receive as parameters:
|
||||
`state`: the default state constructed for the `pre_fork_name` fork
|
||||
according to the `with_state` decorator.
|
||||
`fork_epoch`: the `fork_epoch` provided to this decorator, if given.
|
||||
`spec`: the version of the eth2 spec corresponding to `pre_fork_name`.
|
||||
`post_spec`: the version of the eth2 spec corresponding to `post_fork_name`.
|
||||
`pre_tag`: a function to tag data as belonging to `pre_fork_name` fork.
|
||||
Used to discriminate data during consumption of the generated spec tests.
|
||||
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
|
||||
Used to discriminate data during consumption of the generated spec tests.
|
||||
"""
|
||||
def _wrapper(fn):
|
||||
@with_phases([pre_fork_name], other_phases=[post_fork_name])
|
||||
@spec_test
|
||||
@with_state
|
||||
def _adapter(*args, **kwargs):
|
||||
wrapped = build_transition_test(fn,
|
||||
pre_fork_name,
|
||||
post_fork_name,
|
||||
fork_epoch=fork_epoch)
|
||||
return wrapped(*args, **kwargs)
|
||||
return _adapter
|
||||
return _wrapper
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
|
||||
@@ -6,10 +6,12 @@ from eth2spec.test.helpers.custody import (
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_on_time_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
expect_assertion_error,
|
||||
disable_process_reveal_deadlines,
|
||||
spec_state_test,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
|
||||
@@ -5,12 +5,14 @@ from eth2spec.test.helpers.custody import (
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_on_time_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils.ssz.ssz_typing import ByteList
|
||||
from eth2spec.test.helpers.state import get_balance, transition_to
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
CUSTODY_GAME,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
|
||||
@@ -7,13 +7,15 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_phases,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import (
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
)
|
||||
from eth2spec.test.helpers.custody import (
|
||||
|
||||
@@ -3,12 +3,14 @@ from eth2spec.test.helpers.custody import (
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
with_phases,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import (
|
||||
run_custody_key_reveal_processing,
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
from typing import Dict, Sequence
|
||||
|
||||
from eth2spec.test.context import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CUSTODY_GAME,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.custody import (
|
||||
get_custody_slashable_test_vector,
|
||||
get_valid_chunk_challenge,
|
||||
|
||||
42
tests/core/pyspec/eth2spec/test/helpers/altair/fork.py
Normal file
42
tests/core/pyspec/eth2spec/test/helpers/altair/fork.py
Normal file
@@ -0,0 +1,42 @@
|
||||
ALTAIR_FORK_TEST_META_TAGS = {
|
||||
'fork': 'altair',
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
# Clean up state to be more realistic
|
||||
pre_state.current_epoch_attestations = []
|
||||
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_altair(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
@@ -1,4 +1,5 @@
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.context import is_post_altair, is_post_merge
|
||||
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
@@ -94,6 +95,9 @@ def build_empty_block(spec, state, slot=None):
|
||||
if is_post_altair(spec):
|
||||
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
||||
if is_post_merge(spec):
|
||||
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
||||
|
||||
34
tests/core/pyspec/eth2spec/test/helpers/constants.py
Normal file
34
tests/core/pyspec/eth2spec/test/helpers/constants.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from .typing import SpecForkName, ConfigName
|
||||
|
||||
|
||||
#
|
||||
# SpecForkName
|
||||
#
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
ALTAIR = SpecForkName('altair')
|
||||
MERGE = SpecForkName('merge')
|
||||
|
||||
# Experimental phases (not included in default "ALL_PHASES"):
|
||||
SHARDING = SpecForkName('sharding')
|
||||
CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
|
||||
# The forks that pytest runs with.
|
||||
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
||||
# TODO: everything runs in parallel to Altair.
|
||||
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
|
||||
FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS)
|
||||
|
||||
# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple.
|
||||
FORKS_BEFORE_MERGE = (PHASE0,)
|
||||
|
||||
#
|
||||
# Config
|
||||
#
|
||||
MAINNET = ConfigName('mainnet')
|
||||
MINIMAL = ConfigName('minimal')
|
||||
|
||||
ALL_CONFIGS = (MINIMAL, MAINNET)
|
||||
@@ -9,6 +9,7 @@ def get_process_calls(spec):
|
||||
# or the old function will stick around.
|
||||
return [
|
||||
'process_justification_and_finalization',
|
||||
'process_inactivity_updates', # altair
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines', # custody game
|
||||
@@ -26,7 +27,7 @@ def get_process_calls(spec):
|
||||
'process_participation_flag_updates' if is_post_altair(spec) else (
|
||||
'process_participation_record_updates'
|
||||
),
|
||||
'process_sync_committee_updates',
|
||||
'process_sync_committee_updates', # altair
|
||||
'process_shard_epoch_increment' # sharding
|
||||
]
|
||||
|
||||
|
||||
59
tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
Normal file
59
tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
Normal file
@@ -0,0 +1,59 @@
|
||||
def build_empty_execution_payload(spec, state):
|
||||
"""
|
||||
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
|
||||
"""
|
||||
latest = state.latest_execution_payload_header
|
||||
timestamp = spec.compute_time_at_slot(state, state.slot)
|
||||
empty_txs = spec.List[spec.OpaqueTransaction, spec.MAX_EXECUTION_TRANSACTIONS]()
|
||||
|
||||
payload = spec.ExecutionPayload(
|
||||
block_hash=spec.Hash32(),
|
||||
parent_hash=latest.block_hash,
|
||||
coinbase=spec.Bytes20(),
|
||||
state_root=latest.state_root, # no changes to the state
|
||||
number=latest.number + 1,
|
||||
gas_limit=latest.gas_limit, # retain same limit
|
||||
gas_used=0, # empty block, 0 gas
|
||||
timestamp=timestamp,
|
||||
receipt_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
|
||||
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||
transactions=empty_txs,
|
||||
)
|
||||
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
|
||||
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def get_execution_payload_header(spec, execution_payload):
|
||||
return spec.ExecutionPayloadHeader(
|
||||
block_hash=execution_payload.block_hash,
|
||||
parent_hash=execution_payload.parent_hash,
|
||||
coinbase=execution_payload.coinbase,
|
||||
state_root=execution_payload.state_root,
|
||||
number=execution_payload.number,
|
||||
gas_limit=execution_payload.gas_limit,
|
||||
gas_used=execution_payload.gas_used,
|
||||
timestamp=execution_payload.timestamp,
|
||||
receipt_root=execution_payload.receipt_root,
|
||||
logs_bloom=execution_payload.logs_bloom,
|
||||
transactions_root=spec.hash_tree_root(execution_payload.transactions)
|
||||
)
|
||||
|
||||
|
||||
def build_state_with_incomplete_transition(spec, state):
|
||||
return build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
|
||||
|
||||
|
||||
def build_state_with_complete_transition(spec, state):
|
||||
pre_state_payload = build_empty_execution_payload(spec, state)
|
||||
payload_header = get_execution_payload_header(spec, pre_state_payload)
|
||||
|
||||
return build_state_with_execution_payload_header(spec, state, payload_header)
|
||||
|
||||
|
||||
def build_state_with_execution_payload_header(spec, state, execution_payload_header):
|
||||
pre_state = state.copy()
|
||||
pre_state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return pre_state
|
||||
@@ -1,7 +1,5 @@
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.phase0 import spec as phase0_spec
|
||||
|
||||
|
||||
def get_anchor_root(spec, state):
|
||||
anchor_block_header = state.latest_block_header.copy()
|
||||
@@ -58,8 +56,7 @@ def get_genesis_forkchoice_store(spec, genesis_state):
|
||||
|
||||
def get_genesis_forkchoice_store_and_block(spec, genesis_state):
|
||||
assert genesis_state.slot == spec.GENESIS_SLOT
|
||||
# The genesis block must be a Phase 0 `BeaconBlock`
|
||||
genesis_block = phase0_spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
|
||||
genesis_block = spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
|
||||
return spec.get_forkchoice_store(genesis_state, genesis_block), genesis_block
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
FORKS_BEFORE_ALTAIR,
|
||||
MERGE,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
|
||||
|
||||
@@ -20,6 +25,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
deposit_root = b'\x42' * 32
|
||||
|
||||
eth1_block_hash = b'\xda' * 32
|
||||
current_version = spec.GENESIS_FORK_VERSION
|
||||
|
||||
if spec.fork == ALTAIR:
|
||||
current_version = spec.ALTAIR_FORK_VERSION
|
||||
elif spec.fork == MERGE:
|
||||
current_version = spec.MERGE_FORK_VERSION
|
||||
|
||||
state = spec.BeaconState(
|
||||
genesis_time=0,
|
||||
eth1_deposit_index=len(validator_balances),
|
||||
@@ -30,7 +42,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
),
|
||||
fork=spec.Fork(
|
||||
previous_version=spec.GENESIS_FORK_VERSION,
|
||||
current_version=spec.GENESIS_FORK_VERSION,
|
||||
current_version=current_version,
|
||||
epoch=spec.GENESIS_EPOCH,
|
||||
),
|
||||
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
|
||||
@@ -47,8 +59,18 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
if validator.effective_balance >= activation_threshold:
|
||||
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
||||
validator.activation_epoch = spec.GENESIS_EPOCH
|
||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(spec.uint64(0))
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = spec.hash_tree_root(state.validators)
|
||||
|
||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = spec.get_next_sync_committee(state)
|
||||
state.next_sync_committee = spec.get_next_sync_committee(state)
|
||||
|
||||
return state
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
from random import Random
|
||||
|
||||
|
||||
def randomize_inactivity_scores(spec, state, minimum=0, maximum=50000, rng=Random(4242)):
|
||||
state.inactivity_scores = [rng.randint(minimum, maximum) for _ in range(len(state.validators))]
|
||||
113
tests/core/pyspec/eth2spec/test/helpers/random.py
Normal file
113
tests/core/pyspec/eth2spec/test/helpers/random.py
Normal file
@@ -0,0 +1,113 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
def set_some_new_deposits(spec, state, rng):
|
||||
num_validators = len(state.validators)
|
||||
# Set ~1/10 to just recently deposited
|
||||
for index in range(num_validators):
|
||||
# If not already active, skip
|
||||
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
|
||||
continue
|
||||
if rng.randrange(num_validators) < num_validators // 10:
|
||||
mock_deposit(spec, state, index)
|
||||
# Set ~half of selected to eligible for activation
|
||||
if rng.choice([True, False]):
|
||||
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
|
||||
|
||||
|
||||
def exit_random_validators(spec, state, rng):
|
||||
if spec.get_current_epoch(state) < 5:
|
||||
# Move epochs forward to allow for some validators already exited/withdrawable
|
||||
for _ in range(5):
|
||||
next_epoch(spec, state)
|
||||
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# Exit ~1/2 of validators
|
||||
for index in spec.get_active_validator_indices(state, current_epoch):
|
||||
if rng.choice([True, False]):
|
||||
continue
|
||||
|
||||
validator = state.validators[index]
|
||||
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
|
||||
# ~1/2 are withdrawable
|
||||
if rng.choice([True, False]):
|
||||
validator.withdrawable_epoch = current_epoch
|
||||
else:
|
||||
validator.withdrawable_epoch = current_epoch + 1
|
||||
|
||||
|
||||
def slash_random_validators(spec, state, rng):
|
||||
# Slash ~1/2 of validators
|
||||
for index in range(len(state.validators)):
|
||||
# slash at least one validator
|
||||
if index == 0 or rng.choice([True, False]):
|
||||
spec.slash_validator(state, index)
|
||||
|
||||
|
||||
def randomize_epoch_participation(spec, state, epoch, rng):
|
||||
assert epoch in (spec.get_current_epoch(state), spec.get_previous_epoch(state))
|
||||
if not is_post_altair(spec):
|
||||
if epoch == spec.get_current_epoch(state):
|
||||
pending_attestations = state.current_epoch_attestations
|
||||
else:
|
||||
pending_attestations = state.previous_epoch_attestations
|
||||
for pending_attestation in pending_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||
for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
else:
|
||||
if epoch == spec.get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
for index in range(len(state.validators)):
|
||||
# ~1/3 have bad head or bad target or not timely enough
|
||||
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||
flags = epoch_participation[index]
|
||||
|
||||
def set_flag(index, value):
|
||||
nonlocal flags
|
||||
flag = spec.ParticipationFlags(2**index)
|
||||
if value:
|
||||
flags |= flag
|
||||
else:
|
||||
flags &= 0xff ^ flag
|
||||
|
||||
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||
if is_timely_correct_head:
|
||||
# If timely head, then must be timely target
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||
# If timely head, then must be timely source
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||
else:
|
||||
# ~50% of remaining have bad target or not timely enough
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||
# ~50% of remaining have bad source or not timely enough
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||
epoch_participation[index] = flags
|
||||
|
||||
|
||||
def randomize_attestation_participation(spec, state, rng=Random(8020)):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
|
||||
randomize_epoch_participation(spec, state, spec.get_current_epoch(state), rng)
|
||||
|
||||
|
||||
def randomize_state(spec, state, rng=Random(8020)):
|
||||
set_some_new_deposits(spec, state, rng)
|
||||
exit_random_validators(spec, state, rng)
|
||||
slash_random_validators(spec, state, rng)
|
||||
randomize_attestation_participation(spec, state, rng)
|
||||
@@ -3,9 +3,16 @@ from lru import LRU
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
)
|
||||
from eth2spec.test.helpers.random import (
|
||||
set_some_new_deposits, exit_random_validators, slash_random_validators,
|
||||
randomize_state,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
cached_prepare_state_with_attestations,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import Container, uint64, List
|
||||
|
||||
|
||||
@@ -62,13 +69,13 @@ def run_deltas(spec, state):
|
||||
|
||||
if is_post_altair(spec):
|
||||
def get_source_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||
|
||||
def get_head_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX)
|
||||
|
||||
def get_target_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
@@ -133,14 +140,23 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||
validator = state.validators[index]
|
||||
enough_for_reward = has_enough_for_reward(spec, state, index)
|
||||
if index in matching_indices and not validator.slashed:
|
||||
if enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
if is_post_altair(spec):
|
||||
if not spec.is_in_inactivity_leak(state) and enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
if enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
|
||||
assert penalties[index] == 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
if enough_for_reward:
|
||||
if is_post_altair(spec) and 'head' in deltas_name:
|
||||
assert penalties[index] == 0
|
||||
elif enough_for_reward:
|
||||
assert penalties[index] > 0
|
||||
else:
|
||||
assert penalties[index] == 0
|
||||
@@ -225,18 +241,19 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
if not is_post_altair(spec):
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||
else:
|
||||
base_penalty = sum(
|
||||
base_reward * numerator // spec.WEIGHT_DENOMINATOR
|
||||
for (_, numerator) in spec.get_flag_indices_and_weights()
|
||||
)
|
||||
|
||||
if not has_enough_for_reward(spec, state, index):
|
||||
assert penalties[index] == 0
|
||||
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
||||
assert penalties[index] == base_penalty
|
||||
if is_post_altair(spec):
|
||||
assert penalties[index] == 0
|
||||
else:
|
||||
assert penalties[index] == base_penalty
|
||||
else:
|
||||
assert penalties[index] > base_penalty
|
||||
if is_post_altair(spec):
|
||||
assert penalties[index] > 0
|
||||
else:
|
||||
assert penalties[index] > base_penalty
|
||||
else:
|
||||
assert penalties[index] == 0
|
||||
|
||||
@@ -255,7 +272,6 @@ _cache_dict = LRU(size=10)
|
||||
|
||||
|
||||
def leaking(epochs=None):
|
||||
|
||||
def deco(fn):
|
||||
def entry(*args, spec, state, **kw):
|
||||
# If the pre-state is not already known in the LRU, then take it,
|
||||
@@ -275,49 +291,6 @@ def leaking(epochs=None):
|
||||
return deco
|
||||
|
||||
|
||||
def set_some_new_deposits(spec, state, rng):
|
||||
num_validators = len(state.validators)
|
||||
# Set ~1/10 to just recently deposited
|
||||
for index in range(num_validators):
|
||||
# If not already active, skip
|
||||
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
|
||||
continue
|
||||
if rng.randrange(num_validators) < num_validators // 10:
|
||||
mock_deposit(spec, state, index)
|
||||
# Set ~half of selected to eligible for activation
|
||||
if rng.choice([True, False]):
|
||||
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
|
||||
|
||||
|
||||
def exit_random_validators(spec, state, rng):
|
||||
if spec.get_current_epoch(state) < 5:
|
||||
# Move epochs forward to allow for some validators already exited/withdrawable
|
||||
for _ in range(5):
|
||||
next_epoch(spec, state)
|
||||
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# Exit ~1/2 of validators
|
||||
for index in spec.get_active_validator_indices(state, current_epoch):
|
||||
if rng.choice([True, False]):
|
||||
continue
|
||||
|
||||
validator = state.validators[index]
|
||||
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
|
||||
# ~1/2 are withdrawable
|
||||
if rng.choice([True, False]):
|
||||
validator.withdrawable_epoch = current_epoch
|
||||
else:
|
||||
validator.withdrawable_epoch = current_epoch + 1
|
||||
|
||||
|
||||
def slash_random_validators(spec, state, rng):
|
||||
# Slash ~1/2 of validators
|
||||
for index in range(len(state.validators)):
|
||||
# slash at least one validator
|
||||
if index == 0 or rng.choice([True, False]):
|
||||
spec.slash_validator(state, index)
|
||||
|
||||
|
||||
def run_test_empty(spec, state):
|
||||
# Do not add any attestations to state
|
||||
|
||||
@@ -521,49 +494,5 @@ def run_test_all_balances_too_low_for_reward(spec, state):
|
||||
|
||||
|
||||
def run_test_full_random(spec, state, rng=Random(8020)):
|
||||
set_some_new_deposits(spec, state, rng)
|
||||
exit_random_validators(spec, state, rng)
|
||||
slash_random_validators(spec, state, rng)
|
||||
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
if not is_post_altair(spec):
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||
for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
# ~1/3 have bad head or bad target or not timely enough
|
||||
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||
flags = state.previous_epoch_participation[index]
|
||||
|
||||
def set_flag(index, value):
|
||||
nonlocal flags
|
||||
flag = spec.ParticipationFlags(2**index)
|
||||
if value:
|
||||
flags |= flag
|
||||
else:
|
||||
flags &= 0xff ^ flag
|
||||
|
||||
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||
if is_timely_correct_head:
|
||||
# If timely head, then must be timely target
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||
# If timely head, then must be timely source
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||
else:
|
||||
# ~50% of remaining have bad target or not timely enough
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||
# ~50% of remaining have bad source or not timely enough
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||
state.previous_epoch_participation[index] = flags
|
||||
randomize_state(spec, state, rng)
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@@ -42,9 +42,10 @@ def transition_to_slot_via_block(spec, state, slot):
|
||||
|
||||
def transition_to_valid_shard_slot(spec, state):
|
||||
"""
|
||||
Transition to slot `spec.SHARDING_FORK_SLOT + 1` and fork at `spec.SHARDING_FORK_SLOT`.
|
||||
Transition to slot `compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH) + 1`
|
||||
and fork at `compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH)`.
|
||||
"""
|
||||
transition_to(spec, state, spec.SHARDING_FORK_SLOT)
|
||||
transition_to(spec, state, spec.compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH))
|
||||
next_slot(spec, state)
|
||||
|
||||
|
||||
|
||||
4
tests/core/pyspec/eth2spec/test/helpers/typing.py
Normal file
4
tests/core/pyspec/eth2spec/test/helpers/typing.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from typing import NewType
|
||||
|
||||
SpecForkName = NewType("SpecForkName", str)
|
||||
ConfigName = NewType("ConfigName", str)
|
||||
0
tests/core/pyspec/eth2spec/test/merge/__init__.py
Normal file
0
tests/core/pyspec/eth2spec/test/merge/__init__.py
Normal file
@@ -0,0 +1,201 @@
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
get_execution_payload_header,
|
||||
build_state_with_incomplete_transition,
|
||||
build_state_with_complete_transition,
|
||||
)
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_merge_and_later
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
|
||||
|
||||
def run_execution_payload_processing(spec, state, execution_payload, valid=True, execution_valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- execution details, to mock EVM execution ('execution.yml', a dict with 'execution_valid' key and boolean value)
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution', {'execution_valid': execution_valid}
|
||||
yield 'execution_payload', execution_payload
|
||||
|
||||
called_new_block = False
|
||||
|
||||
class TestEngine(spec.NoopExecutionEngine):
|
||||
def new_block(self, payload) -> bool:
|
||||
nonlocal called_new_block, execution_valid
|
||||
called_new_block = True
|
||||
assert payload == execution_payload
|
||||
return execution_valid
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_execution_payload(state, execution_payload, TestEngine()))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_execution_payload(state, execution_payload, TestEngine())
|
||||
|
||||
# Make sure we called the engine
|
||||
assert called_new_block
|
||||
|
||||
yield 'post', state
|
||||
|
||||
assert state.latest_execution_payload_header == get_execution_payload_header(spec, execution_payload)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_first_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_regular_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_first_payload_with_gap_slot(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_regular_payload_with_gap_slot(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_execution_first_payload(spec, state):
|
||||
# completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
|
||||
|
||||
# pre-state
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_execution_regular_payload(spec, state):
|
||||
# completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
|
||||
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_parent_hash_regular_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.parent_hash = spec.Hash32()
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_number_regular_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.number = execution_payload.number + 1
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_everything_regular_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.parent_hash = spec.Hash32()
|
||||
execution_payload.number = execution_payload.number + 1
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_timestamp_first_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.timestamp = execution_payload.timestamp + 1
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_bad_timestamp_regular_payload(spec, state):
|
||||
# pre-state
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
# execution payload
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.timestamp = execution_payload.timestamp + 1
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
25
tests/core/pyspec/eth2spec/test/merge/sanity/test_blocks.py
Normal file
25
tests/core/pyspec/eth2spec/test/merge/sanity/test_blocks.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_merge_and_later, spec_state_test
|
||||
)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_empty_block_transition(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
assert len(block.body.execution_payload.transactions) == 0
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
# TODO: tests with EVM, mock or replacement?
|
||||
@@ -1,4 +1,5 @@
|
||||
from eth2spec.test.context import PHASE0, spec_state_test, with_phases
|
||||
from eth2spec.test.context import spec_state_test, with_phases
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
spec_state_test,
|
||||
with_all_phases,
|
||||
@@ -9,6 +8,7 @@ from eth2spec.test.context import (
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
tick_and_run_on_attestation,
|
||||
tick_and_run_on_block,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
single_phase,
|
||||
spec_test,
|
||||
with_configs,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_full_genesis_deposits,
|
||||
prepare_random_genesis_deposits,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
is_post_altair,
|
||||
spec_test,
|
||||
single_phase,
|
||||
with_configs,
|
||||
with_all_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_full_genesis_deposits,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
@@ -39,8 +39,16 @@ def test_full_random_3(spec, state):
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_full_random_low_balances(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||
def test_full_random_low_balances_0(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(5050))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_full_random_low_balances_1(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(6060))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@@ -48,4 +56,4 @@ def test_full_random_low_balances(spec, state):
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_full_random_misc_balances(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070))
|
||||
|
||||
@@ -24,9 +24,8 @@ from eth2spec.test.helpers.multi_operations import (
|
||||
run_slash_and_exit,
|
||||
run_test_full_random_operations,
|
||||
)
|
||||
|
||||
from eth2spec.test.helpers.constants import PHASE0, MINIMAL
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, MINIMAL,
|
||||
spec_test, spec_state_test, dump_skipping_message,
|
||||
with_phases, with_all_phases, single_phase,
|
||||
expect_assertion_error, always_bls,
|
||||
@@ -931,8 +930,11 @@ def test_balance_driven_status_transitions(spec, state):
|
||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
# Requires always_bls because historical root period and sync committee period is same length
|
||||
# so this epoch transition also computes new sync committees which requires aggregation
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_historical_batch(spec, state):
|
||||
state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1
|
||||
pre_historical_roots_len = len(state.historical_roots)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from eth2spec.test.context import PHASE0, ALTAIR, with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store
|
||||
|
||||
@@ -18,7 +19,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
if spec.fork in (PHASE0, ALTAIR):
|
||||
if spec.fork in (PHASE0, ALTAIR, MERGE):
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
|
||||
@@ -224,14 +224,19 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
last_block_root = hash_tree_root(last_signed_block.message)
|
||||
|
||||
# Mock justified block in store
|
||||
# Mock fictitious justified checkpoint in store
|
||||
store.justified_checkpoint = spec.Checkpoint(
|
||||
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
||||
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
||||
)
|
||||
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
|
||||
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
||||
just_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Slot is same as justified checkpoint so does not trigger an override in the store
|
||||
just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
store.blocks[just_block.hash_tree_root()] = just_block
|
||||
|
||||
# Step time past safe slots
|
||||
@@ -274,14 +279,19 @@ def test_on_block_outside_safe_slots_but_finality(spec, state):
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
last_block_root = hash_tree_root(last_signed_block.message)
|
||||
|
||||
# Mock justified block in store
|
||||
# Mock fictitious justified checkpoint in store
|
||||
store.justified_checkpoint = spec.Checkpoint(
|
||||
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
||||
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
||||
)
|
||||
|
||||
next_epoch(spec, state)
|
||||
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
|
||||
|
||||
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
||||
just_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Slot is same as justified checkpoint so does not trigger an override in the store
|
||||
just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
store.blocks[just_block.hash_tree_root()] = just_block
|
||||
|
||||
# Step time past safe slots
|
||||
@@ -291,13 +301,15 @@ def test_on_block_outside_safe_slots_but_finality(spec, state):
|
||||
# Mock justified and finalized update in state
|
||||
just_fin_state = store.block_states[last_block_root]
|
||||
new_justified = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
epoch=spec.compute_epoch_at_slot(just_block.slot) + 1,
|
||||
root=just_block.hash_tree_root(),
|
||||
)
|
||||
assert new_justified.epoch > store.justified_checkpoint.epoch
|
||||
new_finalized = spec.Checkpoint(
|
||||
epoch=store.finalized_checkpoint.epoch + 1,
|
||||
epoch=spec.compute_epoch_at_slot(just_block.slot),
|
||||
root=just_block.parent_root,
|
||||
)
|
||||
assert new_finalized.epoch > store.finalized_checkpoint.epoch
|
||||
just_fin_state.current_justified_checkpoint = new_justified
|
||||
just_fin_state.finalized_checkpoint = new_finalized
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
always_bls, with_phases, with_all_phases,
|
||||
PHASE0,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from eth2spec.test.context import (
|
||||
SHARDING,
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import SHARDING
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import inspect
|
||||
from typing import Dict, Any
|
||||
from eth2spec.utils.ssz.ssz_typing import View
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
@@ -93,3 +94,50 @@ def with_meta_tags(tags: Dict[str, Any]):
|
||||
yield k, 'meta', v
|
||||
return entry
|
||||
return runner
|
||||
|
||||
|
||||
def build_transition_test(fn, pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
"""
|
||||
Handles the inner plumbing to generate `transition_test`s.
|
||||
See that decorator in `context.py` for more information.
|
||||
"""
|
||||
def _adapter(*args, **kwargs):
|
||||
post_spec = kwargs["phases"][post_fork_name]
|
||||
|
||||
pre_fork_counter = 0
|
||||
|
||||
def pre_tag(obj):
|
||||
nonlocal pre_fork_counter
|
||||
pre_fork_counter += 1
|
||||
return obj
|
||||
|
||||
def post_tag(obj):
|
||||
return obj
|
||||
|
||||
yield "post_fork", "meta", post_fork_name
|
||||
|
||||
has_fork_epoch = False
|
||||
if fork_epoch:
|
||||
kwargs["fork_epoch"] = fork_epoch
|
||||
has_fork_epoch = True
|
||||
yield "fork_epoch", "meta", fork_epoch
|
||||
|
||||
# massage args to handle an optional custom state using
|
||||
# `with_custom_state` decorator
|
||||
expected_args = inspect.getfullargspec(fn)
|
||||
if "phases" not in expected_args.kwonlyargs:
|
||||
kwargs.pop("phases", None)
|
||||
|
||||
for part in fn(*args,
|
||||
post_spec=post_spec,
|
||||
pre_tag=pre_tag,
|
||||
post_tag=post_tag,
|
||||
**kwargs):
|
||||
if part[0] == "fork_epoch":
|
||||
has_fork_epoch = True
|
||||
yield part
|
||||
assert has_fork_epoch
|
||||
|
||||
if pre_fork_counter > 0:
|
||||
yield "fork_block", "meta", pre_fork_counter - 1
|
||||
return _adapter
|
||||
|
||||
@@ -6,3 +6,6 @@ from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32,
|
||||
from remerkleable.bitfields import Bitvector, Bitlist
|
||||
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
||||
from remerkleable.core import BasicView, View, Path
|
||||
|
||||
|
||||
Bytes20 = ByteVector[20] # type: ignore
|
||||
|
||||
@@ -139,6 +139,8 @@ E.g. `pre.ssz_snappy`, `deposit.ssz_snappy`, `post.ssz_snappy`.
|
||||
Diffing a `pre.ssz_snappy` and `post.ssz_snappy` provides all the information for testing, when decompressed and decoded.
|
||||
Then the difference between pre and post can be compared to anything that changes the pre state, e.g. `deposit.ssz_snappy`
|
||||
|
||||
Note that by default, the SSZ data is in the given test case's <fork or phase name> version, e.g., if it's `altair` test case, use `altair.BeaconState` container to deserialize the given state.
|
||||
|
||||
YAML is generally used for test metadata, and for tests that do not use SSZ: e.g. shuffling and BLS tests.
|
||||
In this case, there is no point in adding special SSZ types. And the size and efficiency of YAML is acceptable.
|
||||
|
||||
|
||||
@@ -32,9 +32,8 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
- `justification_and_finalization`
|
||||
- `inactivity_penalty_updates`
|
||||
- `rewards_and_penalties`
|
||||
- `registry_updates`
|
||||
- `slashings`
|
||||
@@ -44,5 +43,6 @@ Sub-transitions:
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `participation_record_updates`
|
||||
- `sync_committee_updates`
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
||||
@@ -33,17 +33,23 @@ This excludes the other parts of the block-transition.
|
||||
|
||||
Operations:
|
||||
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|-----------------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|----------------------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
|
||||
| `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Merge) |
|
||||
|
||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||
|
||||
The `execution_payload` processing normally requires a `verify_execution_state_transition(execution_payload)`,
|
||||
a responsibility of an (external) execution engine.
|
||||
During testing this execution is mocked, an `execution.yml` is provided instead:
|
||||
a dict containing an `execution_valid` boolean field with the verification result.
|
||||
|
||||
The resulting state should match the expected `post` state, or if the `post` state is left blank,
|
||||
the handler should reject the input operation as invalid.
|
||||
|
||||
72
tests/formats/transition/README.md
Normal file
72
tests/formats/transition/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Transition testing
|
||||
|
||||
Transition tests to cover processing the chain across a fork boundary.
|
||||
|
||||
Each test case contains a `post_fork` key in the `meta.yaml` that indicates the target fork which also fixes the fork the test begins in.
|
||||
|
||||
Clients should assume forks happen sequentially in the following manner:
|
||||
|
||||
0. `phase0`
|
||||
1. `altair`
|
||||
|
||||
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
post_fork: string -- String name of the spec after the fork.
|
||||
fork_epoch: int -- The epoch at which the fork takes place.
|
||||
fork_block: int -- Optional. The `<index>` of the last block on the initial fork.
|
||||
blocks_count: int -- The number of blocks processed in this test.
|
||||
```
|
||||
|
||||
*Note*: There may be a fork transition function to run at the `fork_epoch`.
|
||||
Refer to the specs for the relevant fork for further details.
|
||||
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A SSZ-snappy encoded `BeaconState` according to the specification of
|
||||
the initial fork, the state before running the block transitions.
|
||||
|
||||
### `blocks_<index>.ssz_snappy`
|
||||
|
||||
A series of files, with `<index>` in range `[0, blocks_count)`.
|
||||
Blocks must be processed in order, following the main transition function
|
||||
(i.e. process slot and epoch transitions in between blocks as normal).
|
||||
|
||||
Blocks are encoded as `SignedBeaconBlock`s from the relevant spec version
|
||||
as indicated by the `post_fork` and `fork_block` data in the `meta.yaml`.
|
||||
|
||||
As blocks span fork boundaires, a `fork_block` number is given in
|
||||
the `meta.yaml` to help resolve which blocks belong to which fork.
|
||||
|
||||
The `fork_block` is the index in the test data of the **last** block
|
||||
of the **initial** fork.
|
||||
|
||||
To demonstrate, the following diagram shows slots with `_` and blocks
|
||||
in those slots as `x`. The fork happens at the epoch delineated by the `|`.
|
||||
|
||||
```
|
||||
x x x x
|
||||
_ _ _ _ | _ _ _ _
|
||||
```
|
||||
|
||||
The `blocks_count` value in the `meta.yaml` in this case is `4` where the
|
||||
`fork_block` value in the `meta.yaml` is `1`. If this particular example were
|
||||
testing the fork from Phase 0 to Altair, blocks with indices `0, 1` represent
|
||||
`SignedBeaconBlock`s defined in the Phase 0 spec and blocks with indices `2, 3`
|
||||
represent `SignedBeaconBlock`s defined in the Altair spec.
|
||||
|
||||
*Note*: If `fork_block` is missing, then all block data should be
|
||||
interpreted as belonging to the post fork.
|
||||
|
||||
### `post.ssz_snappy`
|
||||
|
||||
A SSZ-snappy encoded `BeaconState` according to the specification of
|
||||
the post fork, the state after running the block transitions.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
@@ -164,7 +164,7 @@ Another example, to generate tests from pytests:
|
||||
```python
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from eth_utils import (
|
||||
import milagro_bls_binding as milagro_bls
|
||||
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -27,6 +28,10 @@ if __name__ == "__main__":
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
|
||||
# No epoch-processing changes in Merge and previous testing repeats with new types, so no additional tests required.
|
||||
# TODO: rebase onto Altair testing later.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
# TODO Custody Game testgen is disabled for now
|
||||
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [
|
||||
# 'reveal_deadlines',
|
||||
@@ -37,6 +42,7 @@ if __name__ == "__main__":
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -1,19 +1,22 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
|
||||
altair_mods = phase_0_mods # No additional altair specific finality tests
|
||||
altair_mods = phase_0_mods # No additional Altair specific finality tests
|
||||
merge_mods = phase_0_mods # No additional Merge specific finality tests
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: spec_merge,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -13,10 +14,13 @@ if __name__ == "__main__":
|
||||
]}
|
||||
# No additional Altair specific finality tests, yet.
|
||||
altair_mods = phase_0_mods
|
||||
# No specific Merge tests yet. TODO: rebase onto Altair testing later.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="fork_choice", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from importlib import reload
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.context import PHASE0, ALTAIR, MINIMAL, MAINNET
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MINIMAL, MAINNET
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.altair.fork import test_fork as test_altair_forks
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -23,6 +24,13 @@ if __name__ == "__main__":
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
|
||||
merge_mods = {
|
||||
**{key: 'eth2spec.test.merge.block_processing.test_process_' + key for key in [
|
||||
'execution_payload',
|
||||
]},
|
||||
**phase_0_mods, # TODO: runs phase0 tests. Rebase to include `altair_mods` testing later.
|
||||
}
|
||||
|
||||
# TODO Custody Game testgen is disabled for now
|
||||
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [
|
||||
# 'attestation',
|
||||
@@ -35,6 +43,7 @@ if __name__ == "__main__":
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="operations", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -16,9 +17,15 @@ if __name__ == "__main__":
|
||||
# No additional altair specific rewards tests, yet.
|
||||
altair_mods = phase_0_mods
|
||||
|
||||
# No additional merge specific rewards tests, yet.
|
||||
# Note: Block rewards are non-epoch rewards and are tested as part of block processing tests.
|
||||
# Transaction fees are part of the execution-layer.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="rewards", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -17,9 +18,15 @@ if __name__ == "__main__":
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests
|
||||
|
||||
# Altair-specific test cases are ignored, but should be included after the Merge is rebased onto Altair work.
|
||||
merge_mods = {**{key: 'eth2spec.test.merge.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # TODO: Merge inherits phase0 tests for now.
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods)
|
||||
|
||||
@@ -6,7 +6,7 @@ from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
|
||||
|
||||
def shuffling_case_fn(seed, count):
|
||||
|
||||
@@ -6,7 +6,7 @@ import ssz_bitvector
|
||||
import ssz_boolean
|
||||
import ssz_uints
|
||||
import ssz_container
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
|
||||
|
||||
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
|
||||
|
||||
@@ -9,7 +9,8 @@ from eth2spec.debug import random_value, encode
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.context import ALTAIR, TESTGEN_FORKS, MINIMAL, MAINNET
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import ALTAIR, MERGE, TESTGEN_FORKS, MINIMAL, MAINNET
|
||||
from eth2spec.utils.ssz.ssz_typing import Container
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
@@ -64,6 +65,7 @@ def create_provider(fork_name, config_name: str, seed: int, mode: random_value.R
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_altair)
|
||||
reload(spec_merge)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
@@ -71,6 +73,8 @@ def create_provider(fork_name, config_name: str, seed: int, mode: random_value.R
|
||||
spec = spec_phase0
|
||||
if fork_name == ALTAIR:
|
||||
spec = spec_altair
|
||||
elif fork_name == MERGE:
|
||||
spec = spec_merge
|
||||
|
||||
for (i, (name, ssz_type)) in enumerate(get_spec_ssz_types(spec)):
|
||||
yield from ssz_static_cases(fork_name, seed * 1000 + i, name, ssz_type, mode, chaos, count)
|
||||
@@ -89,7 +93,6 @@ if __name__ == "__main__":
|
||||
seed += 1
|
||||
settings.append((seed, MAINNET, random_value.RandomizationMode.mode_random, False, 5))
|
||||
seed += 1
|
||||
|
||||
for fork in TESTGEN_FORKS:
|
||||
gen_runner.run_generator("ssz_static", [
|
||||
create_provider(fork, config_name, seed, mode, chaos, cases_if_random)
|
||||
|
||||
42
tests/generators/transition/main.py
Normal file
42
tests/generators/transition/main.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from importlib import reload
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.altair.transition import test_transition as test_altair_transition
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
|
||||
|
||||
|
||||
def create_provider(tests_src, config_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_altair)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='transition',
|
||||
handler_name='core',
|
||||
src=tests_src,
|
||||
fork_name=post_fork_name,
|
||||
phase=pre_fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
TRANSITION_TESTS = ((PHASE0, ALTAIR, test_altair_transition),)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
|
||||
gen_runner.run_generator("transition", [
|
||||
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
|
||||
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
|
||||
])
|
||||
2
tests/generators/transition/requirements.txt
Normal file
2
tests/generators/transition/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
pytest>=4.4
|
||||
../../../[generator]
|
||||
Reference in New Issue
Block a user