Merge branch 'dev'

This commit is contained in:
Danny Ryan
2021-07-13 16:48:46 -06:00
20 changed files with 896 additions and 94 deletions

View File

@@ -278,7 +278,7 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd
#### `get_next_sync_committee`
*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries.
*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries and when [upgrading state to Altair](./fork.md#upgrading-the-state).
```python
def get_next_sync_committee(state: BeaconState) -> SyncCommittee:

View File

@@ -105,7 +105,7 @@ Note that the `ForkDigestValue` path segment of the topic separates the old and
#### Global topics
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee signatures to all potential proposers of beacon blocks.
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks.
##### `beacon_block`
@@ -117,7 +117,7 @@ See the [state transition document](./beacon-chain.md#beaconblockbody) for Altai
##### `sync_committee_contribution_and_proof`
This topic is used to propagate partially aggregated sync committee signatures to be included in future blocks.
This topic is used to propagate partially aggregated sync committee messages to be included in future blocks.
The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message`, `contribution = contribution_and_proof.contribution`, and the following function `get_sync_subcommittee_pubkeys` for convenience:
@@ -151,18 +151,18 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
#### Sync committee subnets
Sync committee subnets are used to propagate unaggregated sync committee signatures to subsections of the network.
Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network.
##### `sync_committee_{subnet_id}`
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
The following validations MUST pass before forwarding the `sync_committee_message` on the network:
- _[IGNORE]_ The signature's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `sync_committee_message.slot == current_slot`.
- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `sync_committee_message.slot == current_slot`.
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`.
Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`
- _[IGNORE]_ There has been no other valid sync committee message for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`
(this requires maintaining a cache of size `SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT` for each subnet that can be flushed after each slot).
Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct.
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
@@ -170,14 +170,14 @@ The following validations MUST pass before forwarding the `sync_committee_messag
#### Sync committees and aggregation
The aggregation scheme closely follows the design of the attestation aggregation scheme.
Sync committee signatures are broadcast into "subnets" defined by a topic.
Sync committee messages are broadcast into "subnets" defined by a topic.
The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants).
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees.
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics.
Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics.
Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
### Transitioning the gossip

View File

@@ -73,7 +73,7 @@ This document is currently illustrative for early Altair testnets and some parts
| Name | Value | Unit |
| - | - | :-: |
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**2` (= 4) | validators |
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**4` (= 16) | validators |
| `SYNC_COMMITTEE_SUBNET_COUNT` | `4` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
## Containers

View File

@@ -211,7 +211,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
# Verify the execution payload is valid
assert execution_engine.on_payload(payload)
# Cache execution payload
# Cache execution payload header
state.latest_execution_payload_header = ExecutionPayloadHeader(
parent_hash=payload.parent_hash,
coinbase=payload.coinbase,

View File

@@ -1 +1 @@
1.1.0-alpha.8
1.1.0-beta.1

View File

@@ -17,8 +17,12 @@ from eth2spec.test.helpers.sync_committee import (
compute_committee_indices,
)
from eth2spec.test.context import (
default_activation_threshold,
expect_assertion_error,
misc_balances,
single_phase,
with_altair_and_later,
with_custom_state,
with_presets,
spec_state_test,
always_bls,
@@ -63,6 +67,29 @@ def get_committee_indices(spec, state, duplicates=False):
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
@with_altair_and_later
@spec_state_test
@always_bls
def test_invalid_signature_bad_domain(spec, state):
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(2020)
random_participant = rng.choice(committee_indices)
block = build_empty_block_for_next_slot(spec, state)
# Exclude one participant whose signature was included.
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[index != random_participant for index in committee_indices],
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
committee_indices, # full committee signs
domain_type=spec.DOMAIN_BEACON_ATTESTER, # Incorrect domain
)
)
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
@with_altair_and_later
@spec_state_test
@always_bls
@@ -170,7 +197,7 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
assert active_validator_count > spec.SYNC_COMMITTEE_SIZE
assert committee_size == len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@@ -179,7 +206,40 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee(spec, state):
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
committee_indices = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee_indices)
committee_bits = [False] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
committee_indices = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee_indices)
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
assert len(committee_bits) == committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
committee_indices = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
@@ -219,7 +279,6 @@ def test_sync_committee_rewards_empty_participants(spec, state):
def test_invalid_signature_past_block(spec, state):
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
blocks = []
for _ in range(2):
# NOTE: need to transition twice to move beyond the degenerate case at genesis
block = build_empty_block_for_next_slot(spec, state)
@@ -234,8 +293,7 @@ def test_invalid_signature_past_block(spec, state):
)
)
signed_block = state_transition_and_sign_block(spec, state, block)
blocks.append(signed_block)
state_transition_and_sign_block(spec, state, block)
invalid_block = build_empty_block_for_next_slot(spec, state)
# Invalid signature from a slot other than the previous
@@ -327,3 +385,223 @@ def test_valid_signature_future_committee(spec, state):
)
yield from run_sync_committee_processing(spec, state, block)
@with_altair_and_later
@spec_state_test
@always_bls
@with_presets([MINIMAL], reason="prefer short search to find matching proposer")
def test_proposer_in_committee_without_participation(spec, state):
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
# NOTE: seem to reliably be getting a matching proposer in the first epoch w/ ``MINIMAL`` preset.
for _ in range(spec.SLOTS_PER_EPOCH):
block = build_empty_block_for_next_slot(spec, state)
proposer_index = block.proposer_index
proposer_pubkey = state.validators[proposer_index].pubkey
proposer_is_in_sync_committee = proposer_pubkey in state.current_sync_committee.pubkeys
if proposer_is_in_sync_committee:
participation = [index != proposer_index for index in committee_indices]
participants = [index for index in committee_indices if index != proposer_index]
else:
participation = [True for _ in committee_indices]
participants = committee_indices
# Valid sync committee signature here...
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=participation,
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
participants,
)
)
if proposer_is_in_sync_committee:
assert state.validators[block.proposer_index].pubkey in state.current_sync_committee.pubkeys
yield from run_sync_committee_processing(spec, state, block)
break
else:
state_transition_and_sign_block(spec, state, block)
else:
raise AssertionError("failed to find a proposer in the sync committee set; check test setup")
@with_altair_and_later
@spec_state_test
@always_bls
@with_presets([MINIMAL], reason="prefer short search to find matching proposer")
def test_proposer_in_committee_with_participation(spec, state):
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
participation = [True for _ in committee_indices]
# NOTE: seem to reliably be getting a matching proposer in the first epoch w/ ``MINIMAL`` preset.
for _ in range(spec.SLOTS_PER_EPOCH):
block = build_empty_block_for_next_slot(spec, state)
proposer_index = block.proposer_index
proposer_pubkey = state.validators[proposer_index].pubkey
proposer_is_in_sync_committee = proposer_pubkey in state.current_sync_committee.pubkeys
# Valid sync committee signature here...
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=participation,
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
committee_indices,
)
)
if proposer_is_in_sync_committee:
assert state.validators[block.proposer_index].pubkey in state.current_sync_committee.pubkeys
yield from run_sync_committee_processing(spec, state, block)
return
else:
state_transition_and_sign_block(spec, state, block)
raise AssertionError("failed to find a proposer in the sync committee set; check test setup")
def _test_harness_for_randomized_test_case(spec, state, duplicates=False, participation_fn=None):
committee_indices = get_committee_indices(spec, state, duplicates=duplicates)
if participation_fn:
participating_indices = participation_fn(committee_indices)
else:
participating_indices = committee_indices
committee_bits = [index in participating_indices for index in committee_indices]
committee_size = len(committee_indices)
if duplicates:
assert committee_size > len(set(committee_indices))
else:
assert committee_size == len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_random_only_one_participant_with_duplicates(spec, state):
rng = random.Random(101)
yield from _test_harness_for_randomized_test_case(
spec,
state,
duplicates=True,
participation_fn=lambda comm: [rng.choice(comm)],
)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_random_low_participation_with_duplicates(spec, state):
rng = random.Random(201)
yield from _test_harness_for_randomized_test_case(
spec,
state,
duplicates=True,
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.25)),
)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_random_high_participation_with_duplicates(spec, state):
rng = random.Random(301)
yield from _test_harness_for_randomized_test_case(
spec,
state,
duplicates=True,
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.75)),
)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_random_all_but_one_participating_with_duplicates(spec, state):
rng = random.Random(401)
yield from _test_harness_for_randomized_test_case(
spec,
state,
duplicates=True,
participation_fn=lambda comm: rng.sample(comm, len(comm) - 1),
)
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
@single_phase
def test_random_misc_balances_and_half_participation_with_duplicates(spec, state):
rng = random.Random(1401)
yield from _test_harness_for_randomized_test_case(
spec,
state,
duplicates=True,
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
)
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_random_only_one_participant_without_duplicates(spec, state):
rng = random.Random(501)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: [rng.choice(comm)],
)
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_random_low_participation_without_duplicates(spec, state):
rng = random.Random(601)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.25)),
)
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_random_high_participation_without_duplicates(spec, state):
rng = random.Random(701)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.75)),
)
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_random_all_but_one_participating_without_duplicates(spec, state):
rng = random.Random(801)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: rng.sample(comm, len(comm) - 1),
)
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
@single_phase
def test_random_misc_balances_and_half_participation_without_duplicates(spec, state):
rng = random.Random(1501)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
)

View File

@@ -1,26 +1,20 @@
from random import Random
from eth2spec.test.context import spec_state_test, with_altair_and_later
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores, zero_inactivity_scores
from eth2spec.test.helpers.state import (
next_epoch_via_block,
set_full_participation,
set_empty_participation,
)
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
from eth2spec.test.helpers.random import (
randomize_attestation_participation,
randomize_previous_epoch_participation,
)
def set_full_participation(spec, state):
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
for index in range(len(state.validators)):
state.current_epoch_participation[index] = full_flags
state.previous_epoch_participation[index] = full_flags
from eth2spec.test.helpers.rewards import leaking
def run_process_inactivity_updates(spec, state):
@@ -33,58 +27,235 @@ def test_genesis(spec, state):
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_genesis_random_scores(spec, state):
rng = Random(10102)
state.inactivity_scores = [rng.randint(0, 100) for _ in state.inactivity_scores]
pre_scores = state.inactivity_scores.copy()
yield from run_process_inactivity_updates(spec, state)
assert state.inactivity_scores == pre_scores
#
# Genesis epoch processing is skipped
# Thus all of following tests all go past genesis epoch to test core functionality
#
def run_inactivity_scores_test(spec, state, participation_fn=None, inactivity_scores_fn=None, rng=Random(10101)):
next_epoch_via_block(spec, state)
if participation_fn is not None:
participation_fn(spec, state, rng=rng)
if inactivity_scores_fn is not None:
inactivity_scores_fn(spec, state, rng=rng)
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_empty_participation(spec, state):
next_epoch_via_block(spec, state)
state.inactivity_scores = [0] * len(state.validators)
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(spec, state, set_empty_participation, zero_inactivity_scores)
assert set(state.inactivity_scores) == set([0])
@with_altair_and_later
@spec_state_test
@leaking()
def test_all_zero_inactivity_scores_empty_participation_leaking(spec, state):
yield from run_inactivity_scores_test(spec, state, set_empty_participation, zero_inactivity_scores)
# Should still in be leak
assert spec.is_in_inactivity_leak(state)
for score in state.inactivity_scores:
assert score > 0
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_random_participation(spec, state):
next_epoch_via_block(spec, state)
state.inactivity_scores = [0] * len(state.validators)
randomize_attestation_participation(spec, state, rng=Random(5555))
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(
spec, state,
randomize_attestation_participation, zero_inactivity_scores, rng=Random(5555),
)
assert set(state.inactivity_scores) == set([0])
@with_altair_and_later
@spec_state_test
@leaking()
def test_all_zero_inactivity_scores_random_participation_leaking(spec, state):
# Only randomize participation in previous epoch to remain in leak
yield from run_inactivity_scores_test(
spec, state,
randomize_previous_epoch_participation, zero_inactivity_scores, rng=Random(5555),
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
assert 0 in state.inactivity_scores
assert len(set(state.inactivity_scores)) > 1
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_full_participation(spec, state):
next_epoch_via_block(spec, state)
set_full_participation(spec, state)
state.inactivity_scores = [0] * len(state.validators)
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, zero_inactivity_scores,
)
assert set(state.inactivity_scores) == set([0])
@with_altair_and_later
@spec_state_test
@leaking()
def test_all_zero_inactivity_scores_full_participation_leaking(spec, state):
# Only set full participation in previous epoch to remain in leak
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, zero_inactivity_scores,
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
assert set(state.inactivity_scores) == set([0])
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_empty_participation(spec, state):
next_epoch_via_block(spec, state)
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(
spec, state,
set_empty_participation, randomize_inactivity_scores, Random(9999),
)
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_empty_participation_leaking(spec, state):
yield from run_inactivity_scores_test(
spec, state,
set_empty_participation, randomize_inactivity_scores, Random(9999),
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_random_participation(spec, state):
next_epoch_via_block(spec, state)
randomize_attestation_participation(spec, state, rng=Random(22222))
randomize_inactivity_scores(spec, state, rng=Random(22222))
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(
spec, state,
randomize_attestation_participation, randomize_inactivity_scores, Random(22222),
)
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_random_participation_leaking(spec, state):
# Only randompize participation in previous epoch to remain in leak
yield from run_inactivity_scores_test(
spec, state,
randomize_previous_epoch_participation, randomize_inactivity_scores, Random(22222),
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_full_participation(spec, state):
next_epoch_via_block(spec, state)
set_full_participation(spec, state)
randomize_inactivity_scores(spec, state, rng=Random(33333))
yield from run_process_inactivity_updates(spec, state)
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, randomize_inactivity_scores, Random(33333),
)
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_full_participation_leaking(spec, state):
# Only set full participation in previous epoch to remain in leak
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, randomize_inactivity_scores, Random(33333),
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
def slash_some_validators(spec, state, rng=Random(40404040)):
# Slash ~1/4 of validaors
for validator_index in range(len(state.validators)):
if rng.choice(range(4)) == 0:
spec.slash_validator(state, validator_index)
@with_altair_and_later
@spec_state_test
def test_some_slashed_zero_scores_full_participation(spec, state):
slash_some_validators(spec, state, rng=Random(33429))
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, zero_inactivity_scores,
)
assert set(state.inactivity_scores) == set([0])
@with_altair_and_later
@spec_state_test
@leaking()
def test_some_slashed_zero_scores_full_participation_leaking(spec, state):
slash_some_validators(spec, state, rng=Random(33221))
yield from run_inactivity_scores_test(
spec, state,
set_full_participation, zero_inactivity_scores,
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)
# Ensure some zero scores (non-slashed values) and non-zero scores (slashed vals) in there
for score, validator in zip(state.inactivity_scores, state.validators):
if validator.slashed:
assert score > 0
else:
assert score == 0
@with_altair_and_later
@spec_state_test
def test_some_slashed_full_random(spec, state):
rng = Random(1010222)
slash_some_validators(spec, state, rng=rng)
yield from run_inactivity_scores_test(
spec, state,
randomize_attestation_participation, randomize_inactivity_scores, rng=rng,
)
@with_altair_and_later
@spec_state_test
@leaking()
def test_some_slashed_full_random_leaking(spec, state):
rng = Random(1102233)
slash_some_validators(spec, state, rng=rng)
yield from run_inactivity_scores_test(
spec, state,
randomize_previous_epoch_participation, randomize_inactivity_scores, rng=rng,
)
# Check still in leak
assert spec.is_in_inactivity_leak(state)

View File

@@ -12,6 +12,13 @@ from eth2spec.test.helpers.state import next_epoch_via_block
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
def get_full_flags(spec):
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
return full_flags
def run_process_participation_flag_updates(spec, state):
old = state.current_epoch_participation.copy()
yield from run_epoch_processing_with(spec, state, 'process_participation_flag_updates')
@@ -33,12 +40,30 @@ def test_all_zeroed(spec, state):
def test_filled(spec, state):
next_epoch_via_block(spec, state)
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
state.previous_epoch_participation = [get_full_flags(spec)] * len(state.validators)
state.current_epoch_participation = [get_full_flags(spec)] * len(state.validators)
state.previous_epoch_participation = [full_flags] * len(state.validators)
state.current_epoch_participation = [full_flags] * len(state.validators)
yield from run_process_participation_flag_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_previous_filled(spec, state):
next_epoch_via_block(spec, state)
state.previous_epoch_participation = [get_full_flags(spec)] * len(state.validators)
state.current_epoch_participation = [0] * len(state.validators)
yield from run_process_participation_flag_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_current_filled(spec, state):
next_epoch_via_block(spec, state)
state.previous_epoch_participation = [0] * len(state.validators)
state.current_epoch_participation = [get_full_flags(spec)] * len(state.validators)
yield from run_process_participation_flag_updates(spec, state)
@@ -55,9 +80,25 @@ def random_flags(spec, state, seed: int, previous=True, current=True):
@with_altair_and_later
@spec_state_test
def test_random(spec, state):
def test_random_0(spec, state):
next_epoch_via_block(spec, state)
random_flags(spec, state, 10)
random_flags(spec, state, 100)
yield from run_process_participation_flag_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_random_1(spec, state):
next_epoch_via_block(spec, state)
random_flags(spec, state, 101)
yield from run_process_participation_flag_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_random_2(spec, state):
next_epoch_via_block(spec, state)
random_flags(spec, state, 102)
yield from run_process_participation_flag_updates(spec, state)

View File

@@ -21,8 +21,8 @@ from eth2spec.test.helpers.epoch_processing import (
#
def run_sync_committees_progress_test(spec, state):
first_sync_committee = state.current_sync_committee
second_sync_committee = state.next_sync_committee
first_sync_committee = state.current_sync_committee.copy()
second_sync_committee = state.next_sync_committee.copy()
current_period = spec.get_current_epoch(state) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
next_period = current_period + 1
@@ -41,6 +41,15 @@ def run_sync_committees_progress_test(spec, state):
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
third_sync_committee = spec.get_next_sync_committee(state)
# Ensure assignments have changed:
assert state.next_sync_committee != second_sync_committee
if current_period > 0:
assert state.current_sync_committee != first_sync_committee
else:
# Current and next are duplicated in genesis period so remain stable
assert state.current_sync_committee == first_sync_committee
# Ensure expected committees were calculated
assert state.current_sync_committee == second_sync_committee
assert state.next_sync_committee == third_sync_committee
@@ -75,5 +84,42 @@ def test_sync_committees_progress_not_genesis(spec, state):
@single_phase
@always_bls
@with_presets([MINIMAL], reason="too slow")
def test_sync_committees_progress_misc_balances(spec, state):
def test_sync_committees_progress_misc_balances_genesis(spec, state):
# Genesis epoch period has an exceptional case
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
yield from run_sync_committees_progress_test(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
@always_bls
@with_presets([MINIMAL], reason="too slow")
def test_sync_committees_progress_misc_balances_not_genesis(spec, state):
# Transition out of the genesis epoch period to test non-exceptional case
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
slot_in_next_period = state.slot + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
transition_to(spec, state, slot_in_next_period)
yield from run_sync_committees_progress_test(spec, state)
@with_altair_and_later
@spec_state_test
@always_bls
@with_presets([MINIMAL], reason="too slow")
def test_sync_committees_no_progress_not_boundary(spec, state):
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
slot_not_at_period_boundary = state.slot + spec.SLOTS_PER_EPOCH
transition_to(spec, state, slot_not_at_period_boundary)
first_sync_committee = state.current_sync_committee.copy()
second_sync_committee = state.next_sync_committee.copy()
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
# Ensure assignments have not changed:
assert state.current_sync_committee == first_sync_committee
assert state.next_sync_committee == second_sync_committee

View File

@@ -112,7 +112,7 @@ def test_random_high_inactivity_scores_leaking(spec, state):
@with_altair_and_later
@spec_state_test
@leaking(epochs=5)
def test_random_high_inactivity_scores_leaking_5_epochs(spec, state):
@leaking(epochs=8)
def test_random_high_inactivity_scores_leaking_8_epochs(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))

View File

@@ -1,8 +1,9 @@
import random
from random import Random
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
next_epoch,
next_epoch_via_block,
set_full_participation_previous_epoch,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
@@ -15,12 +16,14 @@ from eth2spec.test.context import (
with_altair_and_later,
spec_state_test,
)
from eth2spec.test.helpers.rewards import leaking
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0, rng=Random(454545)):
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
participants = random.sample(committee, int(len(committee) * fraction_full))
participants = rng.sample(committee, int(len(committee) * fraction_full))
yield 'pre', state
@@ -51,7 +54,7 @@ def test_full_sync_committee_committee(spec, state):
@spec_state_test
def test_half_sync_committee_committee(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(1212))
@with_altair_and_later
@@ -70,7 +73,7 @@ def test_full_sync_committee_committee_genesis(spec, state):
@with_altair_and_later
@spec_state_test
def test_half_sync_committee_committee_genesis(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(2323))
@with_altair_and_later
@@ -81,11 +84,13 @@ def test_empty_sync_committee_committee_genesis(spec, state):
@with_altair_and_later
@spec_state_test
def test_inactivity_scores(spec, state):
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):
next_epoch_via_block(spec, state)
@leaking()
def test_inactivity_scores_leaking(spec, state):
assert spec.is_in_inactivity_leak(state)
randomize_inactivity_scores(spec, state, rng=Random(5252))
assert len(set(state.inactivity_scores)) > 1
previous_inactivity_scores = state.inactivity_scores.copy()
yield 'pre', state
@@ -97,5 +102,34 @@ def test_inactivity_scores(spec, state):
yield 'blocks', [signed_block]
yield 'post', state
# No participation during a leak so all scores should increase
for pre, post in zip(previous_inactivity_scores, state.inactivity_scores):
assert post == pre + spec.config.INACTIVITY_SCORE_BIAS
@with_altair_and_later
@spec_state_test
@leaking()
def test_inactivity_scores_full_participation_leaking(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(5252))
assert len(set(state.inactivity_scores)) > 1
# Only set full participation for previous epoch to remain in leak
set_full_participation_previous_epoch(spec, state)
previous_inactivity_scores = state.inactivity_scores.copy()
yield 'pre', state
# Block transition to next epoch
block = build_empty_block(spec, state, slot=state.slot + spec.SLOTS_PER_EPOCH)
signed_block = state_transition_and_sign_block(spec, state, block)
assert spec.is_in_inactivity_leak(state)
yield 'blocks', [signed_block]
yield 'post', state
# Full particiaption during a leak so all scores should decrease by 1
for pre, post in zip(previous_inactivity_scores, state.inactivity_scores):
assert post == pre - 1

View File

@@ -0,0 +1,47 @@
from eth2spec.test.context import (
with_altair_and_later,
spec_state_test,
)
from eth2spec.test.helpers.state import (
transition_to,
)
@with_altair_and_later
@spec_state_test
def test_get_sync_subcommittee_pubkeys_current_sync_committee(state, spec):
# Transition to the head of the next period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
== spec.compute_sync_committee_period(next_slot_epoch)
)
sync_committee = state.current_sync_committee
sync_subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
subcommittee_index = 1
i = subcommittee_index * sync_subcommittee_size
expect = sync_committee.pubkeys[i:i + sync_subcommittee_size]
assert spec.get_sync_subcommittee_pubkeys(state, subcommittee_index) == expect
@with_altair_and_later
@spec_state_test
def test_get_sync_subcommittee_pubkeys_next_sync_committee(state, spec):
# Transition to the end of the current period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
!= spec.compute_sync_committee_period(next_slot_epoch)
)
sync_committee = state.next_sync_committee
sync_subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
subcommittee_index = 1
i = subcommittee_index * sync_subcommittee_size
expect = sync_committee.pubkeys[i:i + sync_subcommittee_size]
assert spec.get_sync_subcommittee_pubkeys(state, subcommittee_index) == expect

View File

@@ -1,17 +1,19 @@
import random
from collections import defaultdict
from eth2spec.utils.ssz.ssz_typing import Bitvector
from eth2spec.utils import bls
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.keys import pubkey_to_privkey, privkeys, pubkeys
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature
from eth2spec.utils.bls import only_with_bls
from eth2spec.test.context import (
always_bls,
spec_state_test,
with_altair_and_later,
with_presets,
with_state,
)
from eth2spec.test.helpers.constants import (
MAINNET,
MINIMAL,
)
@@ -29,8 +31,8 @@ def ensure_assignments_in_sync_committee(
@with_altair_and_later
@with_state
def test_is_assigned_to_sync_committee(phases, spec, state):
@spec_state_test
def test_is_assigned_to_sync_committee(spec, state):
epoch = spec.get_current_epoch(state)
validator_indices = spec.get_active_validator_indices(state, epoch)
validator_count = len(validator_indices)
@@ -90,11 +92,11 @@ def _get_sync_committee_signature(
)
@only_with_bls()
@with_altair_and_later
@with_presets([MINIMAL], reason="too slow")
@with_state
def test_process_sync_committee_contributions(phases, spec, state):
@spec_state_test
@always_bls
def test_process_sync_committee_contributions(spec, state):
# skip over slots at genesis
transition_to(spec, state, state.slot + 3)
@@ -137,6 +139,28 @@ def test_process_sync_committee_contributions(phases, spec, state):
spec.process_block(state, block)
@with_altair_and_later
@spec_state_test
@always_bls
def test_get_sync_committee_message(spec, state):
validator_index = 0
block_root = spec.Root(b'\x12' * 32)
sync_committee_message = spec.get_sync_committee_message(
state=state,
block_root=block_root,
validator_index=validator_index,
privkey=privkeys[validator_index],
)
assert sync_committee_message.slot == state.slot
assert sync_committee_message.beacon_block_root == block_root
assert sync_committee_message.validator_index == validator_index
epoch = spec.get_current_epoch(state)
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE, epoch)
signing_root = spec.compute_signing_root(block_root, domain)
signature = bls.Sign(privkeys[validator_index], signing_root)
assert sync_committee_message.signature == signature
def _validator_index_for_pubkey(state, pubkey):
return list(map(lambda v: v.pubkey, state.validators)).index(pubkey)
@@ -155,8 +179,8 @@ def _get_expected_subnets_by_pubkey(sync_committee_members):
@with_altair_and_later
@with_presets([MINIMAL], reason="too slow")
@with_state
def test_compute_subnets_for_sync_committee(state, spec, phases):
@spec_state_test
def test_compute_subnets_for_sync_committee(state, spec):
# Transition to the head of the next period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
@@ -184,8 +208,8 @@ def test_compute_subnets_for_sync_committee(state, spec, phases):
@with_altair_and_later
@with_presets([MINIMAL], reason="too slow")
@with_state
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
@spec_state_test
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec):
# Transition to the end of the period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
@@ -209,3 +233,106 @@ def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, ph
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
expected_subnets = expected_subnets_by_pubkey[pubkey]
assert subnets == expected_subnets
@with_altair_and_later
@spec_state_test
@always_bls
def test_get_sync_committee_selection_proof(spec, state):
slot = 1
subcommittee_index = 0
privkey = privkeys[1]
sync_committee_selection_proof = spec.get_sync_committee_selection_proof(
state,
slot,
subcommittee_index,
privkey,
)
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, spec.compute_epoch_at_slot(slot))
signing_data = spec.SyncAggregatorSelectionData(
slot=slot,
subcommittee_index=subcommittee_index,
)
signing_root = spec.compute_signing_root(signing_data, domain)
pubkey = pubkeys[1]
assert bls.Verify(pubkey, signing_root, sync_committee_selection_proof)
@with_altair_and_later
@spec_state_test
@with_presets([MAINNET], reason="to test against the mainnet SYNC_COMMITTEE_SIZE")
def test_is_sync_committee_aggregator(spec, state):
sample_count = int(spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT) * 100
is_aggregator_count = 0
for i in range(sample_count):
signature = spec.hash(i.to_bytes(32, byteorder="little"))
if spec.is_sync_committee_aggregator(signature):
is_aggregator_count += 1
# Accept ~10% deviation
assert (
spec.TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE * 100 * 0.9
<= is_aggregator_count
<= spec.TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE * 100 * 1.1
)
@with_altair_and_later
@spec_state_test
def test_get_contribution_and_proof(spec, state):
aggregator_index = 10
privkey = privkeys[3]
contribution = spec.SyncCommitteeContribution(
slot=10,
beacon_block_root=b'\x12' * 32,
subcommittee_index=1,
aggregation_bits=spec.Bitvector[spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT](),
signature=b'\x32' * 96,
)
selection_proof = spec.get_sync_committee_selection_proof(
state,
contribution.slot,
contribution.subcommittee_index,
privkey,
)
contribution_and_proof = spec.get_contribution_and_proof(
state,
aggregator_index,
contribution,
privkey,
)
assert contribution_and_proof == spec.ContributionAndProof(
aggregator_index=aggregator_index,
contribution=contribution,
selection_proof=selection_proof,
)
@with_altair_and_later
@spec_state_test
@always_bls
def test_get_contribution_and_proof_signature(spec, state):
privkey = privkeys[3]
pubkey = pubkeys[3]
contribution_and_proof = spec.ContributionAndProof(
aggregator_index=10,
contribution=spec.SyncCommitteeContribution(
slot=10,
beacon_block_root=b'\x12' * 32,
subcommittee_index=1,
aggregation_bits=spec.Bitvector[spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT](),
signature=b'\x34' * 96,
),
selection_proof=b'\x56' * 96,
)
contribution_and_proof_signature = spec.get_contribution_and_proof_signature(
state,
contribution_and_proof,
privkey,
)
contribution = contribution_and_proof.contribution
domain = spec.get_domain(state, spec.DOMAIN_CONTRIBUTION_AND_PROOF, spec.compute_epoch_at_slot(contribution.slot))
signing_root = spec.compute_signing_root(contribution_and_proof, domain)
assert bls.Verify(pubkey, signing_root, contribution_and_proof_signature)

View File

@@ -3,3 +3,7 @@ from random import Random
def randomize_inactivity_scores(spec, state, minimum=0, maximum=50000, rng=Random(4242)):
state.inactivity_scores = [rng.randint(minimum, maximum) for _ in range(len(state.validators))]
def zero_inactivity_scores(spec, state, rng=None):
state.inactivity_scores = [0] * len(state.validators)

View File

@@ -100,6 +100,15 @@ def randomize_epoch_participation(spec, state, epoch, rng):
epoch_participation[index] = flags
def randomize_previous_epoch_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
if not is_post_altair(spec):
state.current_epoch_attestations = []
else:
state.current_epoch_participation = [spec.ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
def randomize_attestation_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)

View File

@@ -260,12 +260,13 @@ def run_get_inactivity_penalty_deltas(spec, state):
def transition_state_to_leak(spec, state, epochs=None):
if epochs is None:
# +1 to trigger inactivity_score transitions
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 1
assert epochs >= spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
# +2 because finality delay is based on previous_epoch and must be more than `MIN_EPOCHS_TO_INACTIVITY_PENALTY`
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2
assert epochs > spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
for _ in range(epochs):
next_epoch(spec, state)
assert spec.is_in_inactivity_leak(state)
_cache_dict = LRU(size=10)

View File

@@ -1,4 +1,4 @@
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.context import expect_assertion_error, is_post_altair
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
@@ -92,3 +92,44 @@ def state_transition_and_sign_block(spec, state, block, expect_fail=False):
transition_unsigned_block(spec, state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
#
# WARNING: The following functions can only be used post-altair due to the manipulation of participation flags directly
#
def _set_full_participation(spec, state, current=True, previous=True):
assert is_post_altair(spec)
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
for index in range(len(state.validators)):
if current:
state.current_epoch_participation[index] = full_flags.copy()
if previous:
state.previous_epoch_participation[index] = full_flags.copy()
def set_full_participation(spec, state, rng=None):
_set_full_participation(spec, state)
def set_full_participation_previous_epoch(spec, state, rng=None):
_set_full_participation(spec, state, current=False, previous=True)
def _set_empty_participation(spec, state, current=True, previous=True):
assert is_post_altair(spec)
for index in range(len(state.validators)):
if current:
state.current_epoch_participation[index] = spec.ParticipationFlags(0)
if previous:
state.previous_epoch_participation[index] = spec.ParticipationFlags(0)
def set_empty_participation(spec, state, rng=None):
_set_empty_participation(spec, state)

View File

@@ -7,8 +7,10 @@ from eth2spec.test.helpers.block import (
from eth2spec.utils import bls
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None):
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE, spec.compute_epoch_at_slot(slot))
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None, domain_type=None):
if not domain_type:
domain_type = spec.DOMAIN_SYNC_COMMITTEE
domain = spec.get_domain(state, domain_type, spec.compute_epoch_at_slot(slot))
if block_root is None:
if slot == state.slot:
block_root = build_empty_block_for_next_slot(spec, state).parent_root
@@ -18,7 +20,7 @@ def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None
return bls.Sign(privkey, signing_root)
def compute_aggregate_sync_committee_signature(spec, state, slot, participants, block_root=None):
def compute_aggregate_sync_committee_signature(spec, state, slot, participants, block_root=None, domain_type=None):
if len(participants) == 0:
return spec.G2_POINT_AT_INFINITY
@@ -32,6 +34,7 @@ def compute_aggregate_sync_committee_signature(spec, state, slot, participants,
slot,
privkey,
block_root=block_root,
domain_type=domain_type,
)
)
return bls.Aggregate(signatures)

View File

@@ -145,8 +145,8 @@ def test_full_random_leak(spec, state):
@with_all_phases
@spec_state_test
@leaking(epochs=5)
def test_full_random_five_epoch_leak(spec, state):
@leaking(epochs=7)
def test_full_random_seven_epoch_leak(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state)