Merge pull request #1003 from ethereum/master

Backport (partial) v0.6.1 to dev
This commit is contained in:
Danny Ryan
2019-05-01 20:19:52 -06:00
committed by GitHub
19 changed files with 352 additions and 338 deletions

View File

@@ -15,6 +15,8 @@ MAX_INDICES_PER_ATTESTATION: 4096
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Normalizes base rewards
BASE_REWARDS_PER_EPOCH: 5
# See issue 563
SHUFFLE_ROUND_COUNT: 90
@@ -36,7 +38,7 @@ MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
HIGH_BALANCE_INCREMENT: 1000000000
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
@@ -71,6 +73,8 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State list lengths
@@ -91,14 +95,14 @@ BASE_REWARD_QUOTIENT: 32
WHISTLEBLOWING_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 16777216
# 2**25 (= 33,554,432)
INACTIVITY_PENALTY_QUOTIENT: 33554432
# 2**5 (= 32)
MIN_SLASHING_PENALTY_QUOTIENT: 32
# Max operations per block
# ---------------------------------------------------------------
# 2**5 (= 32)
MIN_PENALTY_QUOTIENT: 32
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**0 (= 1)

View File

@@ -6,7 +6,6 @@
# [customized] Just 8 shards for testing purposes
SHARD_COUNT: 8
# [customized] unsecure, but fast
TARGET_COMMITTEE_SIZE: 4
# 2**12 (= 4,096)
@@ -15,6 +14,8 @@ MAX_INDICES_PER_ATTESTATION: 4096
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Normalizes base rewards
BASE_REWARDS_PER_EPOCH: 5
# [customized] Faster, but unsecure.
SHUFFLE_ROUND_COUNT: 10
@@ -36,7 +37,7 @@ MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
HIGH_BALANCE_INCREMENT: 1000000000
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
@@ -71,6 +72,8 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State list lengths
@@ -91,14 +94,14 @@ BASE_REWARD_QUOTIENT: 32
WHISTLEBLOWING_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 16777216
# 2**25 (= 33,554,432)
INACTIVITY_PENALTY_QUOTIENT: 33554432
# 2**5 (= 32)
MIN_SLASHING_PENALTY_QUOTIENT: 32
# Max operations per block
# ---------------------------------------------------------------
# 2**5 (= 32)
MIN_PENALTY_QUOTIENT: 32
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**0 (= 1)

View File

@@ -5,10 +5,8 @@ import function_puller
def build_phase0_spec(sourcefile, outfile):
code_lines = []
code_lines.append("""
from typing import (
Any,
Callable,
Dict,
List,
NewType,
@@ -17,19 +15,16 @@ from typing import (
from eth2spec.utils.minimal_ssz import *
from eth2spec.utils.bls_stub import *
""")
""")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
code_lines.append("""
# stub, will get overwritten by real var
SLOTS_PER_EPOCH = 64
def slot_to_epoch(x): return x // SLOTS_PER_EPOCH
Slot = NewType('Slot', int) # uint64
Epoch = NewType('Epoch', int) # uint64
Shard = NewType('Shard', int) # uint64
@@ -38,31 +33,26 @@ Gwei = NewType('Gwei', int) # uint64
Bytes32 = NewType('Bytes32', bytes) # bytes32
BLSPubkey = NewType('BLSPubkey', bytes) # bytes48
BLSSignature = NewType('BLSSignature', bytes) # bytes96
Any = None
Store = None
""")
""")
code_lines += function_puller.get_spec(sourcefile)
code_lines.append("""
# Monkey patch validator get committee code
# Monkey patch validator compute committee code
_compute_committee = compute_committee
committee_cache = {}
def compute_committee(validator_indices: List[ValidatorIndex],
seed: Bytes32,
index: int,
total_committees: int) -> List[ValidatorIndex]:
param_hash = (hash_tree_root(validator_indices), seed, index, total_committees)
def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
param_hash = (hash_tree_root(indices), seed, index, count)
if param_hash in committee_cache:
# print("Cache hit, epoch={0}".format(epoch))
print("Cache hit, param_hash: ", param_hash)
return committee_cache[param_hash]
else:
# print("Cache miss, epoch={0}".format(epoch))
ret = _compute_committee(validator_indices, seed, index, total_committees)
print("Cache miss, param_hash: ", param_hash)
ret = _compute_committee(indices, seed, index, count)
committee_cache[param_hash] = ret
return ret
@@ -88,7 +78,7 @@ def apply_constants_preset(preset: Dict[str, Any]):
# Deal with derived constants
global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
# Initialize SSZ types again, to account for changed lengths
init_SSZ_types()
""")

View File

@@ -62,9 +62,10 @@ def get_spec(file_name: str) -> List[str]:
code_lines.append('')
for type_line in ssz_type:
code_lines.append(' ' + type_line)
code_lines.append('')
code_lines.append('\n')
code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']')
code_lines.append('')
code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType: return globals()[name]')
code_lines.append('\n')
code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:')
code_lines.append(' return globals()[name]')
code_lines.append('')
return code_lines

View File

@@ -60,20 +60,20 @@
- [`get_active_validator_indices`](#get_active_validator_indices)
- [`increase_balance`](#increase_balance)
- [`decrease_balance`](#decrease_balance)
- [`get_permuted_index`](#get_permuted_index)
- [`get_split_offset`](#get_split_offset)
- [`get_epoch_committee_count`](#get_epoch_committee_count)
- [`get_shard_delta`](#get_shard_delta)
- [`compute_committee`](#compute_committee)
- [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot)
- [`get_epoch_start_shard`](#get_epoch_start_shard)
- [`get_attestation_slot`](#get_attestation_slot)
- [`get_block_root_at_slot`](#get_block_root_at_slot)
- [`get_block_root`](#get_block_root)
- [`get_state_root`](#get_state_root)
- [`get_randao_mix`](#get_randao_mix)
- [`get_active_index_root`](#get_active_index_root)
- [`generate_seed`](#generate_seed)
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
- [`verify_merkle_branch`](#verify_merkle_branch)
- [`get_shuffled_index`](#get_shuffled_index)
- [`compute_committee`](#compute_committee)
- [`get_crosslink_committee`](#get_crosslink_committee)
- [`get_attesting_indices`](#get_attesting_indices)
- [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-)
- [`bytes_to_int`](#bytes_to_int)
@@ -83,8 +83,7 @@
- [`verify_bitfield`](#verify_bitfield)
- [`convert_to_indexed`](#convert_to_indexed)
- [`verify_indexed_attestation`](#verify_indexed_attestation)
- [`is_double_vote`](#is_double_vote)
- [`is_surround_vote`](#is_surround_vote)
- [`is_slashable_attestation_data`](#is_slashable_attestation_data)
- [`integer_squareroot`](#integer_squareroot)
- [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch)
- [`get_churn_limit`](#get_churn_limit)
@@ -307,12 +306,12 @@ The types are defined topologically to aid in facilitating an executable version
```python
{
# LMD GHOST vote
'slot': 'uint64',
'beacon_block_root': 'bytes32',
# FFG vote
'source_epoch': 'uint64',
'source_root': 'bytes32',
'target_epoch': 'uint64',
'target_root': 'bytes32',
# Crosslink vote
@@ -404,8 +403,8 @@ The types are defined topologically to aid in facilitating an executable version
'aggregation_bitfield': 'bytes',
# Attestation data
'data': AttestationData,
# Inclusion slot
'inclusion_slot': 'uint64',
# Inclusion delay
'inclusion_delay': 'uint64',
# Proposer index
'proposer_index': 'uint64',
}
@@ -518,6 +517,7 @@ The types are defined topologically to aid in facilitating an executable version
{
'randao_reveal': 'bytes96',
'eth1_data': Eth1Data,
'graffiti': 'bytes32',
'proposer_slashings': [ProposerSlashing],
'attester_slashings': [AttesterSlashing],
'attestations': [Attestation],
@@ -687,7 +687,6 @@ def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
Check if ``validator`` is slashable.
"""
return validator.slashed is False and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
```
### `get_active_validator_indices`
@@ -720,43 +719,6 @@ def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) ->
state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
```
### `get_permuted_index`
```python
def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int:
"""
Return `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy.
Utilizes 'swap or not' shuffling found in
https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf
See the 'generalized domain' algorithm on page 3.
"""
assert index < list_size
assert list_size <= 2**40
for round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size
flip = (pivot - index) % list_size
position = max(index, flip)
source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256))
byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
return index
```
### `get_split_offset`
```python
def get_split_offset(list_size: int, chunks: int, index: int) -> int:
"""
Returns a value such that for a list L, chunk count k and index i,
split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)]
"""
return (list_size * index) // chunks
```
### `get_epoch_committee_count`
```python
@@ -784,65 +746,27 @@ def get_shard_delta(state: BeaconState, epoch: Epoch) -> int:
return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH)
```
### `compute_committee`
### `get_epoch_start_shard`
```python
def compute_committee(validator_indices: List[ValidatorIndex],
seed: Bytes32,
index: int,
total_committees: int) -> List[ValidatorIndex]:
"""
Return the ``index``'th shuffled committee out of a total ``total_committees``
using ``validator_indices`` and ``seed``.
"""
start_offset = get_split_offset(len(validator_indices), total_committees, index)
end_offset = get_split_offset(len(validator_indices), total_committees, index + 1)
return [
validator_indices[get_permuted_index(i, len(validator_indices), seed)]
for i in range(start_offset, end_offset)
]
def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
assert epoch <= get_current_epoch(state) + 1
check_epoch = get_current_epoch(state) + 1
shard = (state.latest_start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT
while check_epoch > epoch:
check_epoch -= 1
shard = (shard + SHARD_COUNT - get_shard_delta(state, check_epoch)) % SHARD_COUNT
return shard
```
Note: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work.
### `get_crosslink_committees_at_slot`
### `get_attestation_slot`
```python
def get_crosslink_committees_at_slot(state: BeaconState,
slot: Slot) -> List[Tuple[List[ValidatorIndex], Shard]]:
"""
Return the list of ``(committee, shard)`` tuples for the ``slot``.
"""
epoch = slot_to_epoch(slot)
current_epoch = get_current_epoch(state)
previous_epoch = get_previous_epoch(state)
next_epoch = current_epoch + 1
assert previous_epoch <= epoch <= next_epoch
indices = get_active_validator_indices(state, epoch)
if epoch == current_epoch:
start_shard = state.latest_start_shard
elif epoch == previous_epoch:
previous_shard_delta = get_shard_delta(state, previous_epoch)
start_shard = (state.latest_start_shard - previous_shard_delta) % SHARD_COUNT
elif epoch == next_epoch:
current_shard_delta = get_shard_delta(state, current_epoch)
start_shard = (state.latest_start_shard + current_shard_delta) % SHARD_COUNT
committees_per_epoch = get_epoch_committee_count(state, epoch)
committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH
offset = slot % SLOTS_PER_EPOCH
slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT
seed = generate_seed(state, epoch)
return [
(
compute_committee(indices, seed, committees_per_slot * offset + i, committees_per_epoch),
(slot_start_shard + i) % SHARD_COUNT,
)
for i in range(committees_per_slot)
]
def get_attestation_slot(state: BeaconState, attestation: Attestation) -> Slot:
epoch = attestation.data.target_epoch
committee_count = get_epoch_committee_count(state, epoch)
offset = (attestation.data.shard + SHARD_COUNT - get_epoch_start_shard(state, epoch)) % SHARD_COUNT
return get_epoch_start_slot(epoch) + offset // (committee_count // SLOTS_PER_EPOCH)
```
### `get_block_root_at_slot`
@@ -868,18 +792,6 @@ def get_block_root(state: BeaconState,
return get_block_root_at_slot(state, get_epoch_start_slot(epoch))
```
### `get_state_root`
```python
def get_state_root(state: BeaconState,
slot: Slot) -> Bytes32:
"""
Return the state root at a recent ``slot``.
"""
assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
```
### `get_randao_mix`
```python
@@ -924,15 +836,19 @@ def generate_seed(state: BeaconState,
```python
def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
"""
Return the beacon proposer index at ``state.slot``.
Return the current beacon proposer index.
"""
current_epoch = get_current_epoch(state)
first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0]
epoch = get_current_epoch(state)
committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
offset = committees_per_slot * (state.slot % SLOTS_PER_EPOCH)
shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
first_committee = get_crosslink_committee(state, epoch, shard)
MAX_RANDOM_BYTE = 2**8 - 1
seed = generate_seed(state, epoch)
i = 0
while True:
candidate_index = first_committee[(current_epoch + i) % len(first_committee)]
random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32]
candidate_index = first_committee[(epoch + i) % len(first_committee)]
random_byte = hash(seed + int_to_bytes8(i // 32))[i % 32]
effective_balance = state.validator_registry[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return candidate_index
@@ -956,6 +872,51 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index:
return value == root
```
### `get_shuffled_index`
```python
def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -> ValidatorIndex:
"""
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
"""
assert index < index_count
assert index_count <= 2**40
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % index_count
flip = (pivot - index) % index_count
position = max(index, flip)
source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256))
byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
return index
```
### `compute_committee`
```python
def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
start = (len(indices) * index) // count
end = (len(indices) * (index + 1)) // count
return [indices[get_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
```
### `get_crosslink_committee`
```python
def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> List[ValidatorIndex]:
return compute_committee(
indices=get_active_validator_indices(state, epoch),
seed=generate_seed(state, epoch),
index=(shard + SHARD_COUNT - get_epoch_start_shard(state, epoch)) % SHARD_COUNT,
count=get_epoch_committee_count(state, epoch),
)
```
### `get_attesting_indices`
```python
@@ -965,10 +926,9 @@ def get_attesting_indices(state: BeaconState,
"""
Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``.
"""
crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot)
crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0]
assert verify_bitfield(bitfield, len(crosslink_committee))
return sorted([index for i, index in enumerate(crosslink_committee) if get_bitfield_bit(bitfield, i) == 0b1])
committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard)
assert verify_bitfield(bitfield, len(committee))
return sorted([index for i, index in enumerate(committee) if get_bitfield_bit(bitfield, i) == 0b1])
```
### `int_to_bytes1`, `int_to_bytes2`, ...
@@ -1088,37 +1048,23 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA
hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)),
],
signature=indexed_attestation.signature,
domain=get_domain(state, DOMAIN_ATTESTATION, slot_to_epoch(indexed_attestation.data.slot)),
domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target_epoch),
)
```
### `is_double_vote`
### `is_slashable_attestation_data`
```python
def is_double_vote(attestation_data_1: AttestationData,
attestation_data_2: AttestationData) -> bool:
def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
"""
Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
"""
target_epoch_1 = slot_to_epoch(attestation_data_1.slot)
target_epoch_2 = slot_to_epoch(attestation_data_2.slot)
return target_epoch_1 == target_epoch_2
```
### `is_surround_vote`
```python
def is_surround_vote(attestation_data_1: AttestationData,
attestation_data_2: AttestationData) -> bool:
"""
Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
"""
source_epoch_1 = attestation_data_1.source_epoch
source_epoch_2 = attestation_data_2.source_epoch
target_epoch_1 = slot_to_epoch(attestation_data_1.slot)
target_epoch_2 = slot_to_epoch(attestation_data_2.slot)
return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1
return (
# Double vote
(data_1 != data_2 and data_1.target_epoch == data_2.target_epoch) or
# Surround vote
(data_1.source_epoch < data_2.source_epoch and data_2.target_epoch < data_1.target_epoch)
)
```
### `integer_squareroot`
@@ -1179,7 +1125,6 @@ Note: All functions in this section mutate `state`.
def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
"""
Initiate the validator of the given ``index``.
Note that this function mutates ``state``.
"""
# Return if validator already initiated exit
validator = state.validator_registry[index]
@@ -1204,7 +1149,6 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex=None) -> None:
"""
Slash the validator with index ``slashed_index``.
Note that this function mutates ``state``.
"""
current_epoch = get_current_epoch(state)
initiate_validator_exit(state, slashed_index)
@@ -1250,7 +1194,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
process_deposit(state, deposit)
# Process genesis activations
for index, validator in enumerate(state.validator_registry):
for validator in state.validator_registry:
if validator.effective_balance >= MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
@@ -1331,7 +1275,7 @@ def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[P
def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
return [
a for a in get_matching_source_attestations(state, epoch)
if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot)
if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_slot(state, a))
]
```
@@ -1351,14 +1295,14 @@ def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestat
```python
def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink:
return Crosslink(
epoch=min(slot_to_epoch(data.slot), state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS),
epoch=min(data.target_epoch, state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS),
previous_crosslink_root=data.previous_crosslink_root,
crosslink_data_root=data.crosslink_data_root,
)
```
```python
def get_winning_crosslink_and_attesting_indices(state: BeaconState, shard: Shard, epoch: Epoch) -> Tuple[Crosslink, List[ValidatorIndex]]:
def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]:
shard_attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.shard == shard]
shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations]
candidate_crosslinks = [
@@ -1366,7 +1310,7 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, shard: Shard
if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c))
]
if len(candidate_crosslinks) == 0:
return Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH), []
return Crosslink(), []
def get_attestations_for(crosslink: Crosslink) -> List[PendingAttestation]:
return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink]
@@ -1378,13 +1322,6 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, shard: Shard
return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink))
```
```python
def get_earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation:
return min([
a for a in attestations if index in get_attesting_indices(state, a.data, a.aggregation_bitfield)
], key=lambda a: a.inclusion_slot)
```
#### Justification and finalization
Run the following function:
@@ -1441,12 +1378,11 @@ Run the following function:
```python
def process_crosslinks(state: BeaconState) -> None:
state.previous_crosslinks = [c for c in state.current_crosslinks]
previous_epoch = get_previous_epoch(state)
next_epoch = get_current_epoch(state) + 1
for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)):
epoch = slot_to_epoch(slot)
for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot):
winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, shard, epoch)
for epoch in (get_previous_epoch(state), get_current_epoch(state)):
for offset in range(get_epoch_committee_count(state, epoch)):
shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
crosslink_committee = get_crosslink_committee(state, epoch, shard)
winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee):
state.current_crosslinks[shard] = winning_crosslink
```
@@ -1461,15 +1397,14 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
if adjusted_quotient == 0:
return 0
return state.validator_registry[index].effective_balance // adjusted_quotient // BASE_REWARDS_PER_EPOCH
```
```python
def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
previous_epoch = get_previous_epoch(state)
total_balance = get_total_active_balance(state)
rewards = [0 for index in range(len(state.validator_registry))]
penalties = [0 for index in range(len(state.validator_registry))]
rewards = [0 for _ in range(len(state.validator_registry))]
penalties = [0 for _ in range(len(state.validator_registry))]
eligible_validator_indices = [
index for index, v in enumerate(state.validator_registry)
if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
@@ -1490,10 +1425,11 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
# Proposer and inclusion delay micro-rewards
for index in get_unslashed_attesting_indices(state, matching_source_attestations):
earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index)
rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT
inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot
rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay
attestation = min([
a for a in attestations if index in get_attesting_indices(state, a.data, a.aggregation_bitfield)
], key=lambda a: a.inclusion_delay)
rewards[attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT
rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // attestation.inclusion_delay
# Inactivity penalty
finality_delay = previous_epoch - state.finalized_epoch
@@ -1504,26 +1440,27 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
if index not in matching_target_attesting_indices:
penalties[index] += state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
return [rewards, penalties]
return rewards, penalties
```
```python
def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
rewards = [0 for index in range(len(state.validator_registry))]
penalties = [0 for index in range(len(state.validator_registry))]
for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))):
epoch = slot_to_epoch(slot)
for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot):
winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, shard, epoch)
attesting_balance = get_total_balance(state, attesting_indices)
committee_balance = get_total_balance(state, crosslink_committee)
for index in crosslink_committee:
base_reward = get_base_reward(state, index)
if index in attesting_indices:
rewards[index] += base_reward * attesting_balance // committee_balance
else:
penalties[index] += base_reward
return [rewards, penalties]
epoch = get_previous_epoch(state)
for offset in range(get_epoch_committee_count(state, epoch)):
shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
crosslink_committee = get_crosslink_committee(state, epoch, shard)
winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
attesting_balance = get_total_balance(state, attesting_indices)
committee_balance = get_total_balance(state, crosslink_committee)
for index in crosslink_committee:
base_reward = get_base_reward(state, index)
if index in attesting_indices:
rewards[index] += base_reward * attesting_balance // committee_balance
else:
penalties[index] += base_reward
return rewards, penalties
```
Run the following function:
@@ -1562,6 +1499,7 @@ def process_registry_updates(state: BeaconState) -> None:
], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
# Dequeued validators for activation up to churn limit (without resetting activation epoch)
for index in activation_queue[:get_churn_limit(state)]:
validator = state.validator_registry[index]
if validator.activation_epoch == FAR_FUTURE_EPOCH:
validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
```
@@ -1603,10 +1541,10 @@ def process_final_updates(state: BeaconState) -> None:
state.eth1_data_votes = []
# Update effective balances with hysteresis
for index, validator in enumerate(state.validator_registry):
balance = min(state.balances[index], MAX_EFFECTIVE_BALANCE)
balance = state.balances[index]
HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2
if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance:
validator.effective_balance = balance - balance % EFFECTIVE_BALANCE_INCREMENT
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
# Update start shard
state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT
# Set active index root
@@ -1691,6 +1629,8 @@ def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None:
#### Operations
Note: All functions in this section mutate `state`.
##### Proposer slashings
Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`.
@@ -1702,7 +1642,6 @@ def process_proposer_slashing(state: BeaconState,
proposer_slashing: ProposerSlashing) -> None:
"""
Process ``ProposerSlashing`` operation.
Note that this function mutates ``state``.
"""
proposer = state.validator_registry[proposer_slashing.proposer_index]
# Verify that the epoch is the same
@@ -1730,31 +1669,21 @@ def process_attester_slashing(state: BeaconState,
attester_slashing: AttesterSlashing) -> None:
"""
Process ``AttesterSlashing`` operation.
Note that this function mutates ``state``.
"""
attestation1 = attester_slashing.attestation_1
attestation2 = attester_slashing.attestation_2
# Check that the attestations are conflicting
assert attestation1.data != attestation2.data
assert (
is_double_vote(attestation1.data, attestation2.data) or
is_surround_vote(attestation1.data, attestation2.data)
)
attestation_1 = attester_slashing.attestation_1
attestation_2 = attester_slashing.attestation_2
assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
assert verify_indexed_attestation(state, attestation_1)
assert verify_indexed_attestation(state, attestation_2)
assert verify_indexed_attestation(state, attestation1)
assert verify_indexed_attestation(state, attestation2)
attesting_indices_1 = attestation1.custody_bit_0_indices + attestation1.custody_bit_1_indices
attesting_indices_2 = attestation2.custody_bit_0_indices + attestation2.custody_bit_1_indices
slashable_indices = [
index for index in attesting_indices_1
if (
index in attesting_indices_2 and
is_slashable_validator(state.validator_registry[index], get_current_epoch(state))
)
]
assert len(slashable_indices) >= 1
for index in slashable_indices:
slash_validator(state, index)
slashed_any = False
attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
attesting_indices_2 = attestation_2.custody_bit_0_indices + attestation_2.custody_bit_1_indices
for index in set(attesting_indices_1).intersection(attesting_indices_2):
if is_slashable_validator(state.validator_registry[index], get_current_epoch(state)):
slash_validator(state, index)
slashed_any = True
assert slashed_any
```
##### Attestations
@@ -1767,15 +1696,13 @@ For each `attestation` in `block.body.attestations`, run the following function:
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
"""
Process ``Attestation`` operation.
Note that this function mutates ``state``.
"""
data = attestation.data
min_slot = state.slot - SLOTS_PER_EPOCH if get_current_epoch(state) > GENESIS_EPOCH else GENESIS_SLOT
assert min_slot <= data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY
attestation_slot = get_attestation_slot(state, attestation)
assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH
# Check target epoch, source epoch, source root, and source crosslink
target_epoch = slot_to_epoch(data.slot)
assert (target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in {
data = attestation.data
assert (data.target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in {
(get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])),
(get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])),
}
@@ -1790,10 +1717,10 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
pending_attestation = PendingAttestation(
data=data,
aggregation_bitfield=attestation.aggregation_bitfield,
inclusion_slot=state.slot,
inclusion_delay=state.slot - attestation_slot,
proposer_index=get_beacon_proposer_index(state),
)
if target_epoch == get_current_epoch(state):
if data.target_epoch == get_current_epoch(state):
state.current_epoch_attestations.append(pending_attestation)
else:
state.previous_epoch_attestations.append(pending_attestation)
@@ -1809,7 +1736,6 @@ For each `deposit` in `block.body.deposits`, run the following function:
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
"""
Process an Eth1 deposit, registering a validator or increasing its balance.
Note that this function mutates ``state``.
"""
# Verify the Merkle branch
assert verify_merkle_branch(
@@ -1840,7 +1766,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT
effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
))
state.balances.append(amount)
else:
@@ -1859,7 +1785,6 @@ For each `exit` in `block.body.voluntary_exits`, run the following function:
def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
"""
Process ``VoluntaryExit`` operation.
Note that this function mutates ``state``.
"""
validator = state.validator_registry[exit.validator_index]
# Verify the validator is active
@@ -1887,7 +1812,6 @@ For each `transfer` in `block.body.transfers`, run the following function:
def process_transfer(state: BeaconState, transfer: Transfer) -> None:
"""
Process ``Transfer`` operation.
Note that this function mutates ``state``.
"""
# Verify the amount and fee are not individually too big (for anti-overflow purposes)
assert state.balances[transfer.sender] >= max(transfer.amount, transfer.fee)

View File

@@ -9,13 +9,16 @@
- [Table of contents](#table-of-contents)
- [Introduction](#introduction)
- [Constants](#constants)
- [Deposit contract](#time-parameters)
- [Gwei values](#gwei-values)
- [Contract](#contract)
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
- [Deposit arguments](#deposit-arguments)
- [Withdrawal credentials](#withdrawal-credentials)
- [Arguments](#arguments)
- [Withdrawal credentials](#withdrawal-credentials)
- [Amount](#amount)
- [Event logs](#event-logs)
- [`Deposit` logs](#deposit-logs)
- [`Eth2Genesis` log](#eth2genesis-log)
- [Vyper code](#vyper-code)
- [Vyper code](#vyper-code)
<!-- /TOC -->
@@ -25,22 +28,29 @@ This document represents is the specification for the beacon chain deposit contr
## Constants
### Deposit contract
### Gwei values
| Name | Value | Unit |
| - | - | - |
| `FULL_DEPOSIT_AMOUNT` | `32 * 10**9` | Gwei |
### Contract
| Name | Value |
| - | - |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
| `CHAIN_START_FULL_DEPOSIT_THRESHOLD` | `2**16` (=65,536) |
## Ethereum 1.0 deposit contract
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state.
### Deposit arguments
### Arguments
The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`.
The deposit contract has a `deposit` function which takes the amount in Ethereum 1.0 transaction, and arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to `DepositData`.
### Withdrawal credentials
#### Withdrawal credentials
One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows:
@@ -49,13 +59,20 @@ One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
#### Amount
* A valid deposit amount should be at least `MIN_DEPOSIT_AMOUNT` in Gwei.
* A deposit with an amount greater than or equal to `FULL_DEPOSIT_AMOUNT` in Gwei is considered as a full deposit.
## Event logs
### `Deposit` logs
Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract.
Every Ethereum 1.0 deposit, of size at least `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
### `Eth2Genesis` log
When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where:
When `CHAIN_START_FULL_DEPOSIT_THRESHOLD` of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where:
* `genesis_time` equals `time` in the `Eth2Genesis` log
* `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log
@@ -63,7 +80,7 @@ When a sufficient amount of full deposits have been made, the deposit contract e
* `latest_eth1_data.block_hash` equals the hash of the block that included the log
* `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest)
### Vyper code
## Vyper code
The source for the Vyper contract lives in a [separate repository](https://github.com/ethereum/deposit_contract) at [https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py](https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py).
@@ -73,4 +90,4 @@ For convenience, we provide the interface to the contract here:
* `__init__()`: initializes the contract
* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree
* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei.
* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei.

View File

@@ -46,9 +46,9 @@ Note: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT
### Beacon chain fork choice rule
The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a [validator](#dfn-validator) `v` subjectively calculates the beacon chain head as follows.
The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a validator `v` subjectively calculates the beacon chain head as follows.
* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the [validator](#dfn-validator) `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`.
* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the validator `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`.
* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store` the processing of which sets `B` as finalized.)
* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists set `justified_head` to `finalized_head`.
* Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as:
@@ -66,7 +66,7 @@ def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock:
return get_ancestor(store, store.get_parent(block), slot)
```
* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first.
* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the validator `v` observed first.
* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`.
* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`.
* Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`.

View File

@@ -120,21 +120,15 @@ This document describes the shard data layer and the shard fork choice rule in P
### `get_period_committee`
```python
def get_period_committee(state: BeaconState,
shard: Shard,
committee_start_epoch: Epoch,
index: int,
committee_count: int) -> List[ValidatorIndex]:
def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard, index: int, count: int) -> List[ValidatorIndex]:
"""
Return committee for a period. Used to construct persistent committees.
"""
active_validator_indices = get_active_validator_indices(state.validator_registry, committee_start_epoch)
seed = generate_seed(state, committee_start_epoch)
return compute_committee(
validator_indices=active_validator_indices,
seed=seed,
index=shard * committee_count + index,
total_committees=SHARD_COUNT * committee_count,
indices=get_active_validator_indices(state, epoch),
seed=generate_seed(state, epoch),
index=shard * count + index,
count=SHARD_COUNT * count,
)
```
@@ -165,7 +159,7 @@ def get_persistent_committee(state: BeaconState,
len(get_active_validator_indices(state.validator_registry, later_start_epoch)) //
(SHARD_COUNT * TARGET_COMMITTEE_SIZE),
) + 1
index = slot % committee_count
earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count)
later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count)

View File

@@ -224,7 +224,7 @@ Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`.
##### Deposits
If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
@@ -410,7 +410,7 @@ If the software crashes at some point within this routine, then when the validat
### Attester slashing
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](../core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](../core/0_beacon-chain.md#is_surround_vote).
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects, i.e. two attestations that satisfy [`is_slashable_attestation_data`](../core/0_beacon-chain.md#is_slashable_attestation_data).
Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order:
1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`.

View File

@@ -1,23 +1,22 @@
# Eth2.0 Test Generators
# Eth 2.0 Test Generators
This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations.
Any issues with the generators and/or generated tests should be filed
in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests/).
Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests).
Whenever a release is made, the new tests are automatically built and
Whenever a release is made, the new tests are automatically built, and
[eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository.
## How to run generators
pre-requisites:
Prerequisites:
- Python 3 installed
- PIP 3
- GNU make
### Cleaning
This removes the existing virtual environments (`/test_generators/<generator>/venv`), and generated tests (`/yaml_tests/`).
This removes the existing virtual environments (`/test_generators/<generator>/venv`) and generated tests (`/yaml_tests/`).
```bash
make clean
@@ -25,7 +24,7 @@ make clean
### Running all test generators
This runs all the generators.
This runs all of the generators.
```bash
make -j 4 gen_yaml_tests
@@ -36,8 +35,7 @@ The `-j N` flag makes the generators run in parallel, with `N` being the amount
### Running a single generator
The make file auto-detects generators in the `test_generators/` directory,
and provides a tests-gen target for each generator, see example.
The makefile auto-detects generators in the `test_generators` directory and provides a tests-gen target for each generator. See example:
```bash
make ./yaml_tests/shuffling/
@@ -45,7 +43,7 @@ make ./yaml_tests/shuffling/
## Developing a generator
Simply open up the generator (not all at once) of choice in your favorite IDE/editor, and run:
Simply open up the generator (not all at once) of choice in your favorite IDE/editor and run:
```bash
# From the root of the generator directory:
@@ -65,10 +63,10 @@ eth-utils==1.4.1
../../test_libs/config_helpers
../../test_libs/pyspec
```
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself, to prevent code duplication and outdated tests.
Applying configurations to the spec is simple, and enables you to create test suites with different contexts.
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests.
Applying configurations to the spec is simple and enables you to create test suites with different contexts.
Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement.
Note: make sure to run `make pyspec` from the root of the specs repository in order to build the pyspec requirement.
Install all the necessary requirements (re-run when you add more):
```bash
@@ -77,7 +75,7 @@ pip3 install -r requirements.txt
And write your initial test generator, extending the base generator:
Write a `main.py` file, here's an example:
Write a `main.py` file. See example:
```python
from gen_base import gen_runner, gen_suite, gen_typing
@@ -134,26 +132,26 @@ if __name__ == "__main__":
```
Recommendations:
- you can have more than just 1 suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])`
- you can concatenate lists of test cases, if you don't want to split it up in suites, however make sure they could be run with one handler.
- you can split your suite creators into different python files/packages, good for code organization.
- use config "minimal" for performance. But also implement a suite with the default config where necessary.
- you may be able to write your test suite creator in a way where it does not make assumptions on constants.
- You can have more than just one suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])`.
- You can concatenate lists of test cases if you don't want to split it up in suites, however, make sure they can be run with one handler.
- You can split your suite creators into different python files/packages; this is good for code organization.
- Use config "minimal" for performance, but also implement a suite with the default config where necessary.
- You may be able to write your test suite creator in a way where it does not make assumptions on constants.
If so, you can generate test suites with different configurations for the same scenario (see example).
- the test-generator accepts `--output` and `--force` (overwrite output)
- The test-generator accepts `--output` and `--force` (overwrite output).
## How to add a new test generator
In order to add a new test generator that builds `New Tests`:
To add a new test generator that builds `New Tests`:
1. Create a new directory `new_tests`, within the `test_generators` directory.
1. Create a new directory `new_tests` within the `test_generators` directory.
Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later.
2. Your generator is assumed to have a `requirements.txt` file,
with any dependencies it may need. Leave it empty if your generator has none.
3. Your generator is assumed to have a `main.py` file in its root.
By adding the base generator to your requirements, you can make a generator really easily. See docs below.
4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`.
The base generator helps you handle this; you only have to define suite headers,
The base generator helps you handle this; you only have to define suite headers
and a list of tests for each suite you generate.
5. Finally, add any linting or testing commands to the
[circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml)
@@ -168,6 +166,6 @@ Do note that generators should be easy to maintain, lean, and based on the spec.
If a test generator is not needed anymore, undo the steps described above and make a new release:
1. remove the generator directory
2. remove the generated tests in the `eth2.0-tests` repository by opening a PR there.
3. make a new release
1. Remove the generator directory.
2. Remove the generated tests in the [`eth2.0-tests`](https://github.com/ethereum/eth2.0-tests) repository by opening a PR there.
3. Make a new release.

View File

@@ -77,7 +77,7 @@ def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[s
keys.pubkeys[index],
keys.withdrawal_creds[index],
keys.privkeys[index],
spec.MAX_DEPOSIT_AMOUNT,
spec.MAX_EFFECTIVE_BALANCE,
)
state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves))

View File

@@ -26,7 +26,7 @@ def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.By
spec.DepositData(
pubkey=pubkeys[i],
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[i][1:],
amount=spec.MAX_DEPOSIT_AMOUNT,
amount=spec.MAX_EFFECTIVE_BALANCE,
proof_of_possession=proof_of_possession,
) for i in range(len(pubkeys))
]

View File

@@ -1,4 +1,10 @@
from typing import Callable, Dict, Tuple, Any
from typing import (
Any,
Callable,
Dict,
Tuple,
)
TestCase = Dict[str, Any]
TestSuite = Dict[str, Any]

View File

@@ -38,8 +38,7 @@ def run_attestation_processing(state, attestation, valid=True):
process_attestation(post_state, attestation)
current_epoch = get_current_epoch(state)
target_epoch = slot_to_epoch(attestation.data.slot)
if target_epoch == current_epoch:
if attestation.data.target_epoch == current_epoch:
assert len(post_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1
else:
assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1

View File

@@ -65,7 +65,7 @@ def test_success_surround(state):
# set attestion1 to surround attestation 2
attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1
attester_slashing.attestation_1.data.slot = attester_slashing.attestation_2.data.slot + spec.SLOTS_PER_EPOCH
attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing)
@@ -85,7 +85,7 @@ def test_same_data(state):
def test_no_double_or_surround(state):
attester_slashing = get_valid_attester_slashing(state)
attester_slashing.attestation_1.data.slot += spec.SLOTS_PER_EPOCH
attester_slashing.attestation_1.data.target_epoch += 1
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)

View File

@@ -15,7 +15,7 @@ from tests.helpers import (
add_attestation_to_state,
build_empty_block_for_next_slot,
fill_aggregate_attestation,
get_crosslink_committee_for_attestation,
get_crosslink_committee,
get_valid_attestation,
next_epoch,
next_slot,
@@ -88,7 +88,7 @@ def test_single_crosslink_update_from_previous_epoch(state):
assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
# ensure rewarded
for index in get_crosslink_committee_for_attestation(state, attestation.data):
for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
@@ -129,7 +129,7 @@ def test_double_late_crosslink(state):
# ensure that the current crosslinks were not updated by the second attestation
assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in get_crosslink_committee_for_attestation(state, attestation_2.data):
for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.shard):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0

View File

@@ -0,0 +1,67 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_current_epoch,
is_active_validator,
)
from tests.helpers import (
next_epoch,
)
# mark entire file as 'state'
pytestmark = pytest.mark.state
def test_activation(state):
index = 0
assert is_active_validator(state.validator_registry[index], get_current_epoch(state))
# Mock a new deposit
state.validator_registry[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
state.validator_registry[index].activation_epoch = spec.FAR_FUTURE_EPOCH
state.validator_registry[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
assert not is_active_validator(state.validator_registry[index], get_current_epoch(state))
pre_state = deepcopy(state)
blocks = []
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
block = next_epoch(state)
blocks.append(block)
assert state.validator_registry[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
assert state.validator_registry[index].activation_epoch != spec.FAR_FUTURE_EPOCH
assert is_active_validator(
state.validator_registry[index],
get_current_epoch(state),
)
return pre_state, blocks, state
def test_ejection(state):
index = 0
assert is_active_validator(state.validator_registry[index], get_current_epoch(state))
assert state.validator_registry[index].exit_epoch == spec.FAR_FUTURE_EPOCH
# Mock an ejection
state.validator_registry[index].effective_balance = spec.EJECTION_BALANCE
pre_state = deepcopy(state)
blocks = []
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
block = next_epoch(state)
blocks.append(block)
assert state.validator_registry[index].exit_epoch != spec.FAR_FUTURE_EPOCH
assert not is_active_validator(
state.validator_registry[index],
get_current_epoch(state),
)
return pre_state, blocks, state

View File

@@ -29,7 +29,7 @@ from eth2spec.phase0.spec import (
get_attesting_indices,
get_block_root,
get_block_root_at_slot,
get_crosslink_committees_at_slot,
get_crosslink_committee,
get_current_epoch,
get_domain,
get_epoch_start_slot,
@@ -174,11 +174,11 @@ def build_attestation_data(state, slot, shard):
crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks
return AttestationData(
slot=slot,
shard=shard,
beacon_block_root=block_root,
source_epoch=justified_epoch,
source_root=justified_block_root,
target_epoch=slot_to_epoch(slot),
target_root=epoch_boundary_root,
crosslink_data_root=spec.ZERO_HASH,
previous_crosslink_root=hash_tree_root(crosslinks[shard]),
@@ -276,14 +276,6 @@ def get_valid_attester_slashing(state):
)
def get_crosslink_committee_for_attestation(state, attestation_data):
"""
Return the crosslink committee corresponding to ``attestation_data``.
"""
crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot)
return [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0]
def get_valid_attestation(state, slot=None):
if slot is None:
slot = state.slot
@@ -296,7 +288,7 @@ def get_valid_attestation(state, slot=None):
attestation_data = build_attestation_data(state, slot, shard)
crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data)
crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard)
committee_size = len(crosslink_committee)
bitfield_length = (committee_size + 7) // 8
@@ -383,13 +375,13 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0)
domain=get_domain(
state=state,
domain_type=spec.DOMAIN_ATTESTATION,
message_epoch=slot_to_epoch(attestation_data.slot),
message_epoch=attestation_data.target_epoch,
)
)
def fill_aggregate_attestation(state, attestation):
crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data)
crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard)
for i in range(len(crosslink_committee)):
attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)
@@ -402,11 +394,29 @@ def add_attestation_to_state(state, attestation, slot):
def next_slot(state):
"""
Transition to the next slot via an empty block.
Return the empty block that triggered the transition.
"""
block = build_empty_block_for_next_slot(state)
state_transition(state, block)
return block
def next_epoch(state):
"""
Transition to the start slot of the next epoch via an empty block.
Return the empty block that triggered the transition.
"""
block = build_empty_block_for_next_slot(state)
block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
state_transition(state, block)
return block
def get_state_root(state, slot) -> bytes:
"""
Return the state root at a recent ``slot``.
"""
assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]

View File

@@ -9,6 +9,7 @@ from eth2spec.utils.minimal_ssz import signing_root
from eth2spec.phase0.spec import (
# constants
ZERO_HASH,
SLOTS_PER_HISTORICAL_ROOT,
# SSZ
Deposit,
Transfer,
@@ -17,7 +18,6 @@ from eth2spec.phase0.spec import (
get_active_validator_indices,
get_beacon_proposer_index,
get_block_root_at_slot,
get_state_root,
get_current_epoch,
get_domain,
advance_slot,
@@ -38,6 +38,7 @@ from .helpers import (
build_deposit_data,
build_empty_block_for_next_slot,
fill_aggregate_attestation,
get_state_root,
get_valid_attestation,
get_valid_attester_slashing,
get_valid_proposer_slashing,